aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-01 14:53:27 -0400
committerDavid S. Miller <davem@davemloft.net>2014-11-01 14:53:27 -0400
commit55b42b5ca2dcf143465968697fe6c6503b05fca1 (patch)
tree91878cd53efc44ba67244d4d3897020828c87c01 /drivers
parent10738eeaf4ab3de092586cefcc082e7d43ca0044 (diff)
parentec1f1276022e4e3ca40871810217d513e39ff250 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/phy/marvell.c Simple overlapping changes in drivers/net/phy/marvell.c Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c3
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h34
-rw-r--r--drivers/acpi/acpica/evgpe.c23
-rw-r--r--drivers/acpi/acpica/evgpeinit.c1
-rw-r--r--drivers/acpi/acpica/evxface.c27
-rw-r--r--drivers/acpi/acpica/evxfevnt.c40
-rw-r--r--drivers/acpi/acpica/evxfgpe.c12
-rw-r--r--drivers/acpi/acpica/hwgpe.c9
-rw-r--r--drivers/acpi/acpica/tbxfroot.c33
-rw-r--r--drivers/acpi/device_pm.c3
-rw-r--r--drivers/acpi/ec.c134
-rw-r--r--drivers/acpi/fan.c338
-rw-r--r--drivers/acpi/int340x_thermal.c51
-rw-r--r--drivers/acpi/internal.h10
-rw-r--r--drivers/acpi/scan.c73
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c18
-rw-r--r--drivers/acpi/utils.c28
-rw-r--r--drivers/base/dma-contiguous.c3
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/bcma/host_pci.c5
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/null_blk.c14
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt.c87
-rw-r--r--drivers/cpufreq/cpufreq.c38
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c110
-rw-r--r--drivers/cpuidle/Kconfig.mips2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c27
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c4
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/firmware/efi/efi.c79
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c62
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c164
-rw-r--r--drivers/firmware/efi/vars.c61
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c18
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c21
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c25
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/hid/hid-debug.c6
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c6
-rw-r--r--drivers/input/keyboard/opencores-kbd.c2
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c2
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/max77693-haptic.c5
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/input/mouse/vsxxxaa.c2
-rw-r--r--drivers/input/serio/altera_ps2.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h297
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c4
-rw-r--r--drivers/leds/led-class.c23
-rw-r--r--drivers/leds/led-core.c19
-rw-r--r--drivers/leds/leds-gpio-register.c5
-rw-r--r--drivers/leds/leds-gpio.c14
-rw-r--r--drivers/leds/leds-lp3944.c3
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c2
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/mailbox.c465
-rw-r--r--drivers/mailbox/pl320-ipc.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c3
-rw-r--r--drivers/media/pci/tw68/Kconfig1
-rw-r--r--drivers/media/pci/tw68/tw68-core.c2
-rw-r--r--drivers/media/platform/Kconfig6
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c6
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig2
-rw-r--r--drivers/media/platform/vivid/Kconfig5
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/tuners/xc5000.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c4
-rw-r--r--drivers/media/usb/hackrf/hackrf.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c1
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c9
-rw-r--r--drivers/misc/cxl/fault.c74
-rw-r--r--drivers/misc/cxl/native.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/dsa/mv88e6171.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c16
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c11
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c58
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c7
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c18
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/macvtap.c16
-rw-r--r--drivers/net/phy/marvell.c6
-rw-r--r--drivers/net/tun.c25
-rw-r--r--drivers/net/usb/cdc_ether.c47
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/usbnet.c20
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/ath/regd.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c25
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c22
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c52
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c6
-rw-r--r--drivers/net/wireless/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/phy.c15
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c11
-rw-r--r--drivers/net/xen-netback/common.h39
-rw-r--r--drivers/net/xen-netback/interface.c74
-rw-r--r--drivers/net/xen-netback/netback.c319
-rw-r--r--drivers/net/xen-netback/xenbus.c22
-rw-r--r--drivers/of/of_reserved_mem.c14
-rw-r--r--drivers/pci/host/pci-imx6.c13
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/pci-sysfs.c8
-rw-r--r--drivers/pci/pcie/pme.c6
-rw-r--r--drivers/power/reset/at91-reset.c4
-rw-r--r--drivers/pwm/Kconfig22
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c31
-rw-r--r--drivers/pwm/pwm-atmel.c24
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c90
-rw-r--r--drivers/pwm/pwm-imx.c71
-rw-r--r--drivers/pwm/pwm-lpss-pci.c64
-rw-r--r--drivers/pwm/pwm-lpss-platform.c68
-rw-r--r--drivers/pwm/pwm-lpss.c137
-rw-r--r--drivers/pwm/pwm-lpss.h32
-rw-r--r--drivers/pwm/pwm-rockchip.c57
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/rtc-pm8xxx.c222
-rw-r--r--drivers/rtc/rtc-s3c.c14
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/scsi/osd/Kbuild2
-rw-r--r--drivers/scsi/osd/Kconfig2
-rw-r--r--drivers/scsi/osd/osd_debug.h2
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osd/osd_uld.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h12
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c11
-rw-r--r--drivers/spi/spi-dw.c1
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-rockchip.c50
-rw-r--r--drivers/spi/spidev.c79
-rw-r--r--drivers/target/Kconfig7
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/loopback/tcm_loop.c29
-rw-r--r--drivers/target/target_core_alua.c33
-rw-r--r--drivers/target/target_core_configfs.c26
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c13
-rw-r--r--drivers/target/target_core_fabric_lib.c6
-rw-r--r--drivers/target/target_core_file.c13
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c107
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c16
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_tmr.c24
-rw-r--r--drivers/target/target_core_tpg.c53
-rw-r--r--drivers/target/target_core_transport.c27
-rw-r--r--drivers/target/target_core_ua.c15
-rw-r--r--drivers/target/target_core_ua.h1
-rw-r--r--drivers/target/target_core_user.c1167
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/thermal/Kconfig49
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/fair_share.c12
-rw-r--r--drivers/thermal/gov_bang_bang.c131
-rw-r--r--drivers/thermal/imx_thermal.c91
-rw-r--r--drivers/thermal/int3403_thermal.c296
-rw-r--r--drivers/thermal/int340x_thermal/Makefile4
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c400
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.h84
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c271
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c242
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c477
-rw-r--r--drivers/thermal/of-thermal.c12
-rw-r--r--drivers/thermal/step_wise.c7
-rw-r--r--drivers/thermal/thermal_core.c12
-rw-r--r--drivers/thermal/thermal_core.h8
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--drivers/video/console/fbcon.c19
-rw-r--r--drivers/video/console/vgacon.c24
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c3
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-hdmi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c8
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.h3
-rw-r--r--drivers/video/fbdev/omap2/dss/dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/rfbi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c18
-rw-r--r--drivers/watchdog/Kconfig54
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/booke_wdt.c28
-rw-r--r--drivers/watchdog/cadence_wdt.c516
-rw-r--r--drivers/watchdog/da9063_wdt.c191
-rw-r--r--drivers/watchdog/dw_wdt.c36
-rw-r--r--drivers/watchdog/imx2_wdt.c43
-rw-r--r--drivers/watchdog/meson_wdt.c236
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c1
-rw-r--r--drivers/watchdog/qcom-wdt.c224
-rw-r--r--drivers/watchdog/rn5t618_wdt.c198
-rw-r--r--drivers/watchdog/s3c2410_wdt.c47
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c24
-rw-r--r--drivers/watchdog/sunxi_wdt.c111
-rw-r--r--drivers/watchdog/ts72xx_wdt.c6
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/pci.c27
333 files changed, 8816 insertions, 2121 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d0f3265fb85d..b23fe37f67c0 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -144,7 +144,7 @@ config ACPI_VIDEO
144 144
145config ACPI_FAN 145config ACPI_FAN
146 tristate "Fan" 146 tristate "Fan"
147 select THERMAL 147 depends on THERMAL
148 default y 148 default y
149 help 149 help
150 This driver supports ACPI fan devices, allowing user-mode 150 This driver supports ACPI fan devices, allowing user-mode
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 505d4d79fe3e..c3b2fcb729f3 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -43,6 +43,7 @@ acpi-y += pci_root.o pci_link.o pci_irq.o
43acpi-y += acpi_lpss.o 43acpi-y += acpi_lpss.o
44acpi-y += acpi_platform.o 44acpi-y += acpi_platform.o
45acpi-y += acpi_pnp.o 45acpi-y += acpi_pnp.o
46acpi-y += int340x_thermal.o
46acpi-y += power.o 47acpi-y += power.o
47acpi-y += event.o 48acpi-y += event.o
48acpi-y += sysfs.o 49acpi-y += sysfs.o
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 2bf9082f7523..6ba8beb6b9d2 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/dma-mapping.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20 21
21#include "internal.h" 22#include "internal.h"
@@ -102,6 +103,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
102 pdevinfo.res = resources; 103 pdevinfo.res = resources;
103 pdevinfo.num_res = count; 104 pdevinfo.num_res = count;
104 pdevinfo.acpi_node.companion = adev; 105 pdevinfo.acpi_node.companion = adev;
106 pdevinfo.dma_mask = DMA_BIT_MASK(32);
105 pdev = platform_device_register_full(&pdevinfo); 107 pdev = platform_device_register_full(&pdevinfo);
106 if (IS_ERR(pdev)) 108 if (IS_ERR(pdev))
107 dev_err(&adev->dev, "platform device creation failed: %ld\n", 109 dev_err(&adev->dev, "platform device creation failed: %ld\n",
@@ -113,3 +115,4 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
113 kfree(resources); 115 kfree(resources);
114 return pdev; 116 return pdev;
115} 117}
118EXPORT_SYMBOL_GPL(acpi_create_platform_device);
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 2ad2351a9833..c318d3e27893 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -127,7 +127,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
127 127
128acpi_status 128acpi_status
129acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, 129acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
130 acpi_event_status * event_status); 130 acpi_event_status *event_status);
131 131
132acpi_status acpi_hw_disable_all_gpes(void); 132acpi_status acpi_hw_disable_all_gpes(void);
133 133
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2747279fbe3c..c00e7e41ad75 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -413,8 +413,8 @@ struct acpi_gpe_handler_info {
413 acpi_gpe_handler address; /* Address of handler, if any */ 413 acpi_gpe_handler address; /* Address of handler, if any */
414 void *context; /* Context to be passed to handler */ 414 void *context; /* Context to be passed to handler */
415 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ 415 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
416 u8 original_flags; /* Original (pre-handler) GPE info */ 416 u8 original_flags; /* Original (pre-handler) GPE info */
417 u8 originally_enabled; /* True if GPE was originally enabled */ 417 u8 originally_enabled; /* True if GPE was originally enabled */
418}; 418};
419 419
420/* Notify info for implicit notify, multiple device objects */ 420/* Notify info for implicit notify, multiple device objects */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index f14882788eee..1afe46e44dac 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -49,6 +49,8 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count);
49/* 49/*
50 * tbxfroot - Root pointer utilities 50 * tbxfroot - Root pointer utilities
51 */ 51 */
52u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp);
53
52acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); 54acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
53 55
54u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length); 56u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index f3f834408441..3a0beeb86ba5 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -117,6 +117,12 @@ struct asl_resource_node {
117 struct asl_resource_node *next; 117 struct asl_resource_node *next;
118}; 118};
119 119
120struct asl_resource_info {
121 union acpi_parse_object *descriptor_type_op; /* Resource descriptor parse node */
122 union acpi_parse_object *mapping_op; /* Used for mapfile support */
123 u32 current_byte_offset; /* Offset in resource template */
124};
125
120/* Macros used to generate AML resource length fields */ 126/* Macros used to generate AML resource length fields */
121 127
122#define ACPI_AML_SIZE_LARGE(r) (sizeof (r) - sizeof (struct aml_resource_large_header)) 128#define ACPI_AML_SIZE_LARGE(r) (sizeof (r) - sizeof (struct aml_resource_large_header))
@@ -449,4 +455,32 @@ union aml_resource {
449 u8 byte_item; 455 u8 byte_item;
450}; 456};
451 457
458/* Interfaces used by both the disassembler and compiler */
459
460void
461mp_save_gpio_info(union acpi_parse_object *op,
462 union aml_resource *resource,
463 u32 pin_count, u16 *pin_list, char *device_name);
464
465void
466mp_save_serial_info(union acpi_parse_object *op,
467 union aml_resource *resource, char *device_name);
468
469char *mp_get_hid_from_parse_tree(struct acpi_namespace_node *hid_node);
470
471char *mp_get_hid_via_namestring(char *device_name);
472
473char *mp_get_connection_info(union acpi_parse_object *op,
474 u32 pin_index,
475 struct acpi_namespace_node **target_node,
476 char **target_name);
477
478char *mp_get_parent_device_hid(union acpi_parse_object *op,
479 struct acpi_namespace_node **target_node,
480 char **parent_device_name);
481
482char *mp_get_ddn_value(char *device_name);
483
484char *mp_get_hid_value(struct acpi_namespace_node *device_node);
485
452#endif 486#endif
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index e4ba4dec86af..2095dfb72bcb 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -100,13 +100,14 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
100 * 100 *
101 * FUNCTION: acpi_ev_enable_gpe 101 * FUNCTION: acpi_ev_enable_gpe
102 * 102 *
103 * PARAMETERS: gpe_event_info - GPE to enable 103 * PARAMETERS: gpe_event_info - GPE to enable
104 * 104 *
105 * RETURN: Status 105 * RETURN: Status
106 * 106 *
107 * DESCRIPTION: Clear a GPE of stale events and enable it. 107 * DESCRIPTION: Clear a GPE of stale events and enable it.
108 * 108 *
109 ******************************************************************************/ 109 ******************************************************************************/
110
110acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 111acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
111{ 112{
112 acpi_status status; 113 acpi_status status;
@@ -125,6 +126,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
125 } 126 }
126 127
127 /* Clear the GPE (of stale events) */ 128 /* Clear the GPE (of stale events) */
129
128 status = acpi_hw_clear_gpe(gpe_event_info); 130 status = acpi_hw_clear_gpe(gpe_event_info);
129 if (ACPI_FAILURE(status)) { 131 if (ACPI_FAILURE(status)) {
130 return_ACPI_STATUS(status); 132 return_ACPI_STATUS(status);
@@ -136,7 +138,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
136 return_ACPI_STATUS(status); 138 return_ACPI_STATUS(status);
137} 139}
138 140
139
140/******************************************************************************* 141/*******************************************************************************
141 * 142 *
142 * FUNCTION: acpi_ev_add_gpe_reference 143 * FUNCTION: acpi_ev_add_gpe_reference
@@ -212,7 +213,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
212 if (ACPI_SUCCESS(status)) { 213 if (ACPI_SUCCESS(status)) {
213 status = 214 status =
214 acpi_hw_low_set_gpe(gpe_event_info, 215 acpi_hw_low_set_gpe(gpe_event_info,
215 ACPI_GPE_DISABLE); 216 ACPI_GPE_DISABLE);
216 } 217 }
217 218
218 if (ACPI_FAILURE(status)) { 219 if (ACPI_FAILURE(status)) {
@@ -334,7 +335,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
334 * 335 *
335 ******************************************************************************/ 336 ******************************************************************************/
336 337
337u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) 338u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
338{ 339{
339 acpi_status status; 340 acpi_status status;
340 struct acpi_gpe_block_info *gpe_block; 341 struct acpi_gpe_block_info *gpe_block;
@@ -427,7 +428,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
427 428
428 /* Check if there is anything active at all in this register */ 429 /* Check if there is anything active at all in this register */
429 430
430 enabled_status_byte = (u8) (status_reg & enable_reg); 431 enabled_status_byte = (u8)(status_reg & enable_reg);
431 if (!enabled_status_byte) { 432 if (!enabled_status_byte) {
432 433
433 /* No active GPEs in this register, move on */ 434 /* No active GPEs in this register, move on */
@@ -450,7 +451,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
450 acpi_ev_gpe_dispatch(gpe_block-> 451 acpi_ev_gpe_dispatch(gpe_block->
451 node, 452 node,
452 &gpe_block-> 453 &gpe_block->
453 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 454 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
454 } 455 }
455 } 456 }
456 } 457 }
@@ -636,7 +637,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
636 * 637 *
637 ******************************************************************************/ 638 ******************************************************************************/
638 639
639acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) 640acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info)
640{ 641{
641 acpi_status status; 642 acpi_status status;
642 643
@@ -666,9 +667,9 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
666 * 667 *
667 * FUNCTION: acpi_ev_gpe_dispatch 668 * FUNCTION: acpi_ev_gpe_dispatch
668 * 669 *
669 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 670 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
670 * gpe_event_info - Info for this GPE 671 * gpe_event_info - Info for this GPE
671 * gpe_number - Number relative to the parent GPE block 672 * gpe_number - Number relative to the parent GPE block
672 * 673 *
673 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 674 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
674 * 675 *
@@ -681,7 +682,7 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
681 682
682u32 683u32
683acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 684acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
684 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 685 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
685{ 686{
686 acpi_status status; 687 acpi_status status;
687 u32 return_value; 688 u32 return_value;
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 49fc7effd961..7be928379879 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -424,6 +424,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
424 } 424 }
425 425
426 /* Disable the GPE in case it's been enabled already. */ 426 /* Disable the GPE in case it's been enabled already. */
427
427 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 428 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
428 429
429 /* 430 /*
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 11e5803b8b41..55a58f3ec8df 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -786,18 +786,26 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
786 handler->method_node = gpe_event_info->dispatch.method_node; 786 handler->method_node = gpe_event_info->dispatch.method_node;
787 handler->original_flags = (u8)(gpe_event_info->flags & 787 handler->original_flags = (u8)(gpe_event_info->flags &
788 (ACPI_GPE_XRUPT_TYPE_MASK | 788 (ACPI_GPE_XRUPT_TYPE_MASK |
789 ACPI_GPE_DISPATCH_MASK)); 789 ACPI_GPE_DISPATCH_MASK));
790 790
791 /* 791 /*
792 * If the GPE is associated with a method, it may have been enabled 792 * If the GPE is associated with a method, it may have been enabled
793 * automatically during initialization, in which case it has to be 793 * automatically during initialization, in which case it has to be
794 * disabled now to avoid spurious execution of the handler. 794 * disabled now to avoid spurious execution of the handler.
795 */ 795 */
796 796 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
797 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) 797 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
798 && gpe_event_info->runtime_count) { 798 gpe_event_info->runtime_count) {
799 handler->originally_enabled = 1; 799 handler->originally_enabled = TRUE;
800 (void)acpi_ev_remove_gpe_reference(gpe_event_info); 800 (void)acpi_ev_remove_gpe_reference(gpe_event_info);
801
802 /* Sanity check of original type against new type */
803
804 if (type !=
805 (u32)(gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
806 ACPI_WARNING((AE_INFO,
807 "GPE type mismatch (level/edge)"));
808 }
801 } 809 }
802 810
803 /* Install the handler */ 811 /* Install the handler */
@@ -808,7 +816,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
808 816
809 gpe_event_info->flags &= 817 gpe_event_info->flags &=
810 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 818 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
811 gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); 819 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER);
812 820
813 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 821 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
814 822
@@ -893,7 +901,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
893 901
894 gpe_event_info->dispatch.method_node = handler->method_node; 902 gpe_event_info->dispatch.method_node = handler->method_node;
895 gpe_event_info->flags &= 903 gpe_event_info->flags &=
896 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 904 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
897 gpe_event_info->flags |= handler->original_flags; 905 gpe_event_info->flags |= handler->original_flags;
898 906
899 /* 907 /*
@@ -901,7 +909,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
901 * enabled, it should be enabled at this point to restore the 909 * enabled, it should be enabled at this point to restore the
902 * post-initialization configuration. 910 * post-initialization configuration.
903 */ 911 */
904 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) && 912 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
913 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
905 handler->originally_enabled) { 914 handler->originally_enabled) {
906 (void)acpi_ev_add_gpe_reference(gpe_event_info); 915 (void)acpi_ev_add_gpe_reference(gpe_event_info);
907 } 916 }
@@ -946,7 +955,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
946 * handle is returned. 955 * handle is returned.
947 * 956 *
948 ******************************************************************************/ 957 ******************************************************************************/
949acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) 958acpi_status acpi_acquire_global_lock(u16 timeout, u32 *handle)
950{ 959{
951 acpi_status status; 960 acpi_status status;
952 961
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index e286640ad4ff..bb8cbf5961bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -324,8 +324,9 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
324 ******************************************************************************/ 324 ******************************************************************************/
325acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) 325acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
326{ 326{
327 acpi_status status = AE_OK; 327 acpi_status status;
328 u32 value; 328 acpi_event_status local_event_status = 0;
329 u32 in_byte;
329 330
330 ACPI_FUNCTION_TRACE(acpi_get_event_status); 331 ACPI_FUNCTION_TRACE(acpi_get_event_status);
331 332
@@ -339,29 +340,40 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
339 return_ACPI_STATUS(AE_BAD_PARAMETER); 340 return_ACPI_STATUS(AE_BAD_PARAMETER);
340 } 341 }
341 342
342 /* Get the status of the requested fixed event */ 343 /* Fixed event currently can be dispatched? */
344
345 if (acpi_gbl_fixed_event_handlers[event].handler) {
346 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
347 }
348
349 /* Fixed event currently enabled? */
343 350
344 status = 351 status =
345 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. 352 acpi_read_bit_register(acpi_gbl_fixed_event_info[event].
346 enable_register_id, &value); 353 enable_register_id, &in_byte);
347 if (ACPI_FAILURE(status)) 354 if (ACPI_FAILURE(status)) {
348 return_ACPI_STATUS(status); 355 return_ACPI_STATUS(status);
356 }
349 357
350 *event_status = value; 358 if (in_byte) {
359 local_event_status |= ACPI_EVENT_FLAG_ENABLED;
360 }
361
362 /* Fixed event currently active? */
351 363
352 status = 364 status =
353 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. 365 acpi_read_bit_register(acpi_gbl_fixed_event_info[event].
354 status_register_id, &value); 366 status_register_id, &in_byte);
355 if (ACPI_FAILURE(status)) 367 if (ACPI_FAILURE(status)) {
356 return_ACPI_STATUS(status); 368 return_ACPI_STATUS(status);
369 }
357 370
358 if (value) 371 if (in_byte) {
359 *event_status |= ACPI_EVENT_FLAG_SET; 372 local_event_status |= ACPI_EVENT_FLAG_SET;
360 373 }
361 if (acpi_gbl_fixed_event_handlers[event].handler)
362 *event_status |= ACPI_EVENT_FLAG_HANDLE;
363 374
364 return_ACPI_STATUS(status); 375 (*event_status) = local_event_status;
376 return_ACPI_STATUS(AE_OK);
365} 377}
366 378
367ACPI_EXPORT_SYMBOL(acpi_get_event_status) 379ACPI_EXPORT_SYMBOL(acpi_get_event_status)
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 56710a03c9b0..e889a5304abd 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -106,8 +106,8 @@ ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
106 * 106 *
107 * FUNCTION: acpi_enable_gpe 107 * FUNCTION: acpi_enable_gpe
108 * 108 *
109 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 109 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
110 * gpe_number - GPE level within the GPE block 110 * gpe_number - GPE level within the GPE block
111 * 111 *
112 * RETURN: Status 112 * RETURN: Status
113 * 113 *
@@ -115,7 +115,6 @@ ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
115 * hardware-enabled. 115 * hardware-enabled.
116 * 116 *
117 ******************************************************************************/ 117 ******************************************************************************/
118
119acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) 118acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
120{ 119{
121 acpi_status status = AE_BAD_PARAMETER; 120 acpi_status status = AE_BAD_PARAMETER;
@@ -490,8 +489,8 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
490 * 489 *
491 * FUNCTION: acpi_get_gpe_status 490 * FUNCTION: acpi_get_gpe_status
492 * 491 *
493 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 492 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
494 * gpe_number - GPE level within the GPE block 493 * gpe_number - GPE level within the GPE block
495 * event_status - Where the current status of the event 494 * event_status - Where the current status of the event
496 * will be returned 495 * will be returned
497 * 496 *
@@ -524,9 +523,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
524 523
525 status = acpi_hw_get_gpe_status(gpe_event_info, event_status); 524 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
526 525
527 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
528 *event_status |= ACPI_EVENT_FLAG_HANDLE;
529
530unlock_and_exit: 526unlock_and_exit:
531 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 527 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
532 return_ACPI_STATUS(status); 528 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index ea62d40fd161..48ac7b7b59cd 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -202,7 +202,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
202 202
203acpi_status 203acpi_status
204acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, 204acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
205 acpi_event_status * event_status) 205 acpi_event_status *event_status)
206{ 206{
207 u32 in_byte; 207 u32 in_byte;
208 u32 register_bit; 208 u32 register_bit;
@@ -216,6 +216,13 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
216 return (AE_BAD_PARAMETER); 216 return (AE_BAD_PARAMETER);
217 } 217 }
218 218
219 /* GPE currently handled? */
220
221 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
222 ACPI_GPE_DISPATCH_NONE) {
223 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
224 }
225
219 /* Get the info block for the entire GPE register */ 226 /* Get the info block for the entire GPE register */
220 227
221 gpe_register_info = gpe_event_info->register_info; 228 gpe_register_info = gpe_event_info->register_info;
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 65ab8fed3d5e..43a54af2b548 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -50,6 +50,36 @@ ACPI_MODULE_NAME("tbxfroot")
50 50
51/******************************************************************************* 51/*******************************************************************************
52 * 52 *
53 * FUNCTION: acpi_tb_get_rsdp_length
54 *
55 * PARAMETERS: rsdp - Pointer to RSDP
56 *
57 * RETURN: Table length
58 *
59 * DESCRIPTION: Get the length of the RSDP
60 *
61 ******************************************************************************/
62u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp)
63{
64
65 if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
66
67 /* BAD Signature */
68
69 return (0);
70 }
71
72 /* "Length" field is available if table version >= 2 */
73
74 if (rsdp->revision >= 2) {
75 return (rsdp->length);
76 } else {
77 return (ACPI_RSDP_CHECKSUM_LENGTH);
78 }
79}
80
81/*******************************************************************************
82 *
53 * FUNCTION: acpi_tb_validate_rsdp 83 * FUNCTION: acpi_tb_validate_rsdp
54 * 84 *
55 * PARAMETERS: rsdp - Pointer to unvalidated RSDP 85 * PARAMETERS: rsdp - Pointer to unvalidated RSDP
@@ -59,7 +89,8 @@ ACPI_MODULE_NAME("tbxfroot")
59 * DESCRIPTION: Validate the RSDP (ptr) 89 * DESCRIPTION: Validate the RSDP (ptr)
60 * 90 *
61 ******************************************************************************/ 91 ******************************************************************************/
62acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) 92
93acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
63{ 94{
64 95
65 /* 96 /*
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index bea6896be122..143ec6ea1468 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -343,6 +343,7 @@ int acpi_device_update_power(struct acpi_device *device, int *state_p)
343 343
344 return 0; 344 return 0;
345} 345}
346EXPORT_SYMBOL_GPL(acpi_device_update_power);
346 347
347int acpi_bus_update_power(acpi_handle handle, int *state_p) 348int acpi_bus_update_power(acpi_handle handle, int *state_p)
348{ 349{
@@ -710,7 +711,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
710 return -ENODEV; 711 return -ENODEV;
711 } 712 }
712 713
713 return acpi_device_wakeup(adev, enable, ACPI_STATE_S0); 714 return acpi_device_wakeup(adev, ACPI_STATE_S0, enable);
714} 715}
715EXPORT_SYMBOL(acpi_pm_device_run_wake); 716EXPORT_SYMBOL(acpi_pm_device_run_wake);
716#endif /* CONFIG_PM_RUNTIME */ 717#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index cb6066c809ea..5f9b74b9b71f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -126,14 +126,16 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
126static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ 126static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
127static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ 127static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
128static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ 128static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
129static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
129 130
130/* -------------------------------------------------------------------------- 131/* --------------------------------------------------------------------------
131 Transaction Management 132 * Transaction Management
132 -------------------------------------------------------------------------- */ 133 * -------------------------------------------------------------------------- */
133 134
134static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 135static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
135{ 136{
136 u8 x = inb(ec->command_addr); 137 u8 x = inb(ec->command_addr);
138
137 pr_debug("EC_SC(R) = 0x%2.2x " 139 pr_debug("EC_SC(R) = 0x%2.2x "
138 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n", 140 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
139 x, 141 x,
@@ -148,6 +150,7 @@ static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
148static inline u8 acpi_ec_read_data(struct acpi_ec *ec) 150static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
149{ 151{
150 u8 x = inb(ec->data_addr); 152 u8 x = inb(ec->data_addr);
153
151 pr_debug("EC_DATA(R) = 0x%2.2x\n", x); 154 pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
152 return x; 155 return x;
153} 156}
@@ -164,10 +167,32 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
164 outb(data, ec->data_addr); 167 outb(data, ec->data_addr);
165} 168}
166 169
170#ifdef DEBUG
171static const char *acpi_ec_cmd_string(u8 cmd)
172{
173 switch (cmd) {
174 case 0x80:
175 return "RD_EC";
176 case 0x81:
177 return "WR_EC";
178 case 0x82:
179 return "BE_EC";
180 case 0x83:
181 return "BD_EC";
182 case 0x84:
183 return "QR_EC";
184 }
185 return "UNKNOWN";
186}
187#else
188#define acpi_ec_cmd_string(cmd) "UNDEF"
189#endif
190
167static int ec_transaction_completed(struct acpi_ec *ec) 191static int ec_transaction_completed(struct acpi_ec *ec)
168{ 192{
169 unsigned long flags; 193 unsigned long flags;
170 int ret = 0; 194 int ret = 0;
195
171 spin_lock_irqsave(&ec->lock, flags); 196 spin_lock_irqsave(&ec->lock, flags);
172 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) 197 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
173 ret = 1; 198 ret = 1;
@@ -181,7 +206,8 @@ static bool advance_transaction(struct acpi_ec *ec)
181 u8 status; 206 u8 status;
182 bool wakeup = false; 207 bool wakeup = false;
183 208
184 pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); 209 pr_debug("===== %s (%d) =====\n",
210 in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
185 status = acpi_ec_read_status(ec); 211 status = acpi_ec_read_status(ec);
186 t = ec->curr; 212 t = ec->curr;
187 if (!t) 213 if (!t)
@@ -198,7 +224,8 @@ static bool advance_transaction(struct acpi_ec *ec)
198 if (t->rlen == t->ri) { 224 if (t->rlen == t->ri) {
199 t->flags |= ACPI_EC_COMMAND_COMPLETE; 225 t->flags |= ACPI_EC_COMMAND_COMPLETE;
200 if (t->command == ACPI_EC_COMMAND_QUERY) 226 if (t->command == ACPI_EC_COMMAND_QUERY)
201 pr_debug("hardware QR_EC completion\n"); 227 pr_debug("***** Command(%s) hardware completion *****\n",
228 acpi_ec_cmd_string(t->command));
202 wakeup = true; 229 wakeup = true;
203 } 230 }
204 } else 231 } else
@@ -210,18 +237,14 @@ static bool advance_transaction(struct acpi_ec *ec)
210 } 237 }
211 return wakeup; 238 return wakeup;
212 } else { 239 } else {
213 /* 240 if (EC_FLAGS_QUERY_HANDSHAKE &&
214 * There is firmware refusing to respond QR_EC when SCI_EVT 241 !(status & ACPI_EC_FLAG_SCI) &&
215 * is not set, for which case, we complete the QR_EC
216 * without issuing it to the firmware.
217 * https://bugzilla.kernel.org/show_bug.cgi?id=86211
218 */
219 if (!(status & ACPI_EC_FLAG_SCI) &&
220 (t->command == ACPI_EC_COMMAND_QUERY)) { 242 (t->command == ACPI_EC_COMMAND_QUERY)) {
221 t->flags |= ACPI_EC_COMMAND_POLL; 243 t->flags |= ACPI_EC_COMMAND_POLL;
222 t->rdata[t->ri++] = 0x00; 244 t->rdata[t->ri++] = 0x00;
223 t->flags |= ACPI_EC_COMMAND_COMPLETE; 245 t->flags |= ACPI_EC_COMMAND_COMPLETE;
224 pr_debug("software QR_EC completion\n"); 246 pr_debug("***** Command(%s) software completion *****\n",
247 acpi_ec_cmd_string(t->command));
225 wakeup = true; 248 wakeup = true;
226 } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 249 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
227 acpi_ec_write_cmd(ec, t->command); 250 acpi_ec_write_cmd(ec, t->command);
@@ -264,6 +287,7 @@ static int ec_poll(struct acpi_ec *ec)
264{ 287{
265 unsigned long flags; 288 unsigned long flags;
266 int repeat = 5; /* number of command restarts */ 289 int repeat = 5; /* number of command restarts */
290
267 while (repeat--) { 291 while (repeat--) {
268 unsigned long delay = jiffies + 292 unsigned long delay = jiffies +
269 msecs_to_jiffies(ec_delay); 293 msecs_to_jiffies(ec_delay);
@@ -296,18 +320,25 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
296{ 320{
297 unsigned long tmp; 321 unsigned long tmp;
298 int ret = 0; 322 int ret = 0;
323
299 if (EC_FLAGS_MSI) 324 if (EC_FLAGS_MSI)
300 udelay(ACPI_EC_MSI_UDELAY); 325 udelay(ACPI_EC_MSI_UDELAY);
301 /* start transaction */ 326 /* start transaction */
302 spin_lock_irqsave(&ec->lock, tmp); 327 spin_lock_irqsave(&ec->lock, tmp);
303 /* following two actions should be kept atomic */ 328 /* following two actions should be kept atomic */
304 ec->curr = t; 329 ec->curr = t;
330 pr_debug("***** Command(%s) started *****\n",
331 acpi_ec_cmd_string(t->command));
305 start_transaction(ec); 332 start_transaction(ec);
333 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
334 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
335 pr_debug("***** Event stopped *****\n");
336 }
306 spin_unlock_irqrestore(&ec->lock, tmp); 337 spin_unlock_irqrestore(&ec->lock, tmp);
307 ret = ec_poll(ec); 338 ret = ec_poll(ec);
308 spin_lock_irqsave(&ec->lock, tmp); 339 spin_lock_irqsave(&ec->lock, tmp);
309 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) 340 pr_debug("***** Command(%s) stopped *****\n",
310 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 341 acpi_ec_cmd_string(t->command));
311 ec->curr = NULL; 342 ec->curr = NULL;
312 spin_unlock_irqrestore(&ec->lock, tmp); 343 spin_unlock_irqrestore(&ec->lock, tmp);
313 return ret; 344 return ret;
@@ -317,6 +348,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
317{ 348{
318 int status; 349 int status;
319 u32 glk; 350 u32 glk;
351
320 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) 352 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
321 return -EINVAL; 353 return -EINVAL;
322 if (t->rdata) 354 if (t->rdata)
@@ -333,8 +365,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
333 goto unlock; 365 goto unlock;
334 } 366 }
335 } 367 }
336 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
337 t->command, t->wdata ? t->wdata[0] : 0);
338 /* disable GPE during transaction if storm is detected */ 368 /* disable GPE during transaction if storm is detected */
339 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { 369 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
340 /* It has to be disabled, so that it doesn't trigger. */ 370 /* It has to be disabled, so that it doesn't trigger. */
@@ -355,7 +385,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
355 t->irq_count); 385 t->irq_count);
356 set_bit(EC_FLAGS_GPE_STORM, &ec->flags); 386 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
357 } 387 }
358 pr_debug("transaction end\n");
359 if (ec->global_lock) 388 if (ec->global_lock)
360 acpi_release_global_lock(glk); 389 acpi_release_global_lock(glk);
361unlock: 390unlock:
@@ -383,7 +412,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
383 acpi_ec_transaction(ec, &t) : 0; 412 acpi_ec_transaction(ec, &t) : 0;
384} 413}
385 414
386static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) 415static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
387{ 416{
388 int result; 417 int result;
389 u8 d; 418 u8 d;
@@ -419,10 +448,9 @@ int ec_read(u8 addr, u8 *val)
419 if (!err) { 448 if (!err) {
420 *val = temp_data; 449 *val = temp_data;
421 return 0; 450 return 0;
422 } else 451 }
423 return err; 452 return err;
424} 453}
425
426EXPORT_SYMBOL(ec_read); 454EXPORT_SYMBOL(ec_read);
427 455
428int ec_write(u8 addr, u8 val) 456int ec_write(u8 addr, u8 val)
@@ -436,22 +464,21 @@ int ec_write(u8 addr, u8 val)
436 464
437 return err; 465 return err;
438} 466}
439
440EXPORT_SYMBOL(ec_write); 467EXPORT_SYMBOL(ec_write);
441 468
442int ec_transaction(u8 command, 469int ec_transaction(u8 command,
443 const u8 * wdata, unsigned wdata_len, 470 const u8 *wdata, unsigned wdata_len,
444 u8 * rdata, unsigned rdata_len) 471 u8 *rdata, unsigned rdata_len)
445{ 472{
446 struct transaction t = {.command = command, 473 struct transaction t = {.command = command,
447 .wdata = wdata, .rdata = rdata, 474 .wdata = wdata, .rdata = rdata,
448 .wlen = wdata_len, .rlen = rdata_len}; 475 .wlen = wdata_len, .rlen = rdata_len};
476
449 if (!first_ec) 477 if (!first_ec)
450 return -ENODEV; 478 return -ENODEV;
451 479
452 return acpi_ec_transaction(first_ec, &t); 480 return acpi_ec_transaction(first_ec, &t);
453} 481}
454
455EXPORT_SYMBOL(ec_transaction); 482EXPORT_SYMBOL(ec_transaction);
456 483
457/* Get the handle to the EC device */ 484/* Get the handle to the EC device */
@@ -461,7 +488,6 @@ acpi_handle ec_get_handle(void)
461 return NULL; 488 return NULL;
462 return first_ec->handle; 489 return first_ec->handle;
463} 490}
464
465EXPORT_SYMBOL(ec_get_handle); 491EXPORT_SYMBOL(ec_get_handle);
466 492
467/* 493/*
@@ -525,13 +551,14 @@ void acpi_ec_unblock_transactions_early(void)
525 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); 551 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags);
526} 552}
527 553
528static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) 554static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data)
529{ 555{
530 int result; 556 int result;
531 u8 d; 557 u8 d;
532 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, 558 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
533 .wdata = NULL, .rdata = &d, 559 .wdata = NULL, .rdata = &d,
534 .wlen = 0, .rlen = 1}; 560 .wlen = 0, .rlen = 1};
561
535 if (!ec || !data) 562 if (!ec || !data)
536 return -EINVAL; 563 return -EINVAL;
537 /* 564 /*
@@ -557,6 +584,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
557{ 584{
558 struct acpi_ec_query_handler *handler = 585 struct acpi_ec_query_handler *handler =
559 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); 586 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
587
560 if (!handler) 588 if (!handler)
561 return -ENOMEM; 589 return -ENOMEM;
562 590
@@ -569,12 +597,12 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
569 mutex_unlock(&ec->mutex); 597 mutex_unlock(&ec->mutex);
570 return 0; 598 return 0;
571} 599}
572
573EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); 600EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
574 601
575void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 602void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
576{ 603{
577 struct acpi_ec_query_handler *handler, *tmp; 604 struct acpi_ec_query_handler *handler, *tmp;
605
578 mutex_lock(&ec->mutex); 606 mutex_lock(&ec->mutex);
579 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 607 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
580 if (query_bit == handler->query_bit) { 608 if (query_bit == handler->query_bit) {
@@ -584,20 +612,20 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
584 } 612 }
585 mutex_unlock(&ec->mutex); 613 mutex_unlock(&ec->mutex);
586} 614}
587
588EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 615EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
589 616
590static void acpi_ec_run(void *cxt) 617static void acpi_ec_run(void *cxt)
591{ 618{
592 struct acpi_ec_query_handler *handler = cxt; 619 struct acpi_ec_query_handler *handler = cxt;
620
593 if (!handler) 621 if (!handler)
594 return; 622 return;
595 pr_debug("start query execution\n"); 623 pr_debug("##### Query(0x%02x) started #####\n", handler->query_bit);
596 if (handler->func) 624 if (handler->func)
597 handler->func(handler->data); 625 handler->func(handler->data);
598 else if (handler->handle) 626 else if (handler->handle)
599 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 627 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
600 pr_debug("stop query execution\n"); 628 pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
601 kfree(handler); 629 kfree(handler);
602} 630}
603 631
@@ -620,8 +648,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
620 if (!copy) 648 if (!copy)
621 return -ENOMEM; 649 return -ENOMEM;
622 memcpy(copy, handler, sizeof(*copy)); 650 memcpy(copy, handler, sizeof(*copy));
623 pr_debug("push query execution (0x%2x) on queue\n", 651 pr_debug("##### Query(0x%02x) scheduled #####\n",
624 value); 652 handler->query_bit);
625 return acpi_os_execute((copy->func) ? 653 return acpi_os_execute((copy->func) ?
626 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, 654 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
627 acpi_ec_run, copy); 655 acpi_ec_run, copy);
@@ -633,6 +661,7 @@ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
633static void acpi_ec_gpe_query(void *ec_cxt) 661static void acpi_ec_gpe_query(void *ec_cxt)
634{ 662{
635 struct acpi_ec *ec = ec_cxt; 663 struct acpi_ec *ec = ec_cxt;
664
636 if (!ec) 665 if (!ec)
637 return; 666 return;
638 mutex_lock(&ec->mutex); 667 mutex_lock(&ec->mutex);
@@ -644,7 +673,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
644{ 673{
645 if (state & ACPI_EC_FLAG_SCI) { 674 if (state & ACPI_EC_FLAG_SCI) {
646 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 675 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
647 pr_debug("push gpe query to the queue\n"); 676 pr_debug("***** Event started *****\n");
648 return acpi_os_execute(OSL_NOTIFY_HANDLER, 677 return acpi_os_execute(OSL_NOTIFY_HANDLER,
649 acpi_ec_gpe_query, ec); 678 acpi_ec_gpe_query, ec);
650 } 679 }
@@ -667,8 +696,8 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
667} 696}
668 697
669/* -------------------------------------------------------------------------- 698/* --------------------------------------------------------------------------
670 Address Space Management 699 * Address Space Management
671 -------------------------------------------------------------------------- */ 700 * -------------------------------------------------------------------------- */
672 701
673static acpi_status 702static acpi_status
674acpi_ec_space_handler(u32 function, acpi_physical_address address, 703acpi_ec_space_handler(u32 function, acpi_physical_address address,
@@ -699,27 +728,26 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
699 switch (result) { 728 switch (result) {
700 case -EINVAL: 729 case -EINVAL:
701 return AE_BAD_PARAMETER; 730 return AE_BAD_PARAMETER;
702 break;
703 case -ENODEV: 731 case -ENODEV:
704 return AE_NOT_FOUND; 732 return AE_NOT_FOUND;
705 break;
706 case -ETIME: 733 case -ETIME:
707 return AE_TIME; 734 return AE_TIME;
708 break;
709 default: 735 default:
710 return AE_OK; 736 return AE_OK;
711 } 737 }
712} 738}
713 739
714/* -------------------------------------------------------------------------- 740/* --------------------------------------------------------------------------
715 Driver Interface 741 * Driver Interface
716 -------------------------------------------------------------------------- */ 742 * -------------------------------------------------------------------------- */
743
717static acpi_status 744static acpi_status
718ec_parse_io_ports(struct acpi_resource *resource, void *context); 745ec_parse_io_ports(struct acpi_resource *resource, void *context);
719 746
720static struct acpi_ec *make_acpi_ec(void) 747static struct acpi_ec *make_acpi_ec(void)
721{ 748{
722 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); 749 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
750
723 if (!ec) 751 if (!ec)
724 return NULL; 752 return NULL;
725 ec->flags = 1 << EC_FLAGS_QUERY_PENDING; 753 ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
@@ -742,9 +770,8 @@ acpi_ec_register_query_methods(acpi_handle handle, u32 level,
742 770
743 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 771 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
744 772
745 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) { 773 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
746 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); 774 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
747 }
748 return AE_OK; 775 return AE_OK;
749} 776}
750 777
@@ -753,7 +780,6 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
753{ 780{
754 acpi_status status; 781 acpi_status status;
755 unsigned long long tmp = 0; 782 unsigned long long tmp = 0;
756
757 struct acpi_ec *ec = context; 783 struct acpi_ec *ec = context;
758 784
759 /* clear addr values, ec_parse_io_ports depend on it */ 785 /* clear addr values, ec_parse_io_ports depend on it */
@@ -781,6 +807,7 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
781static int ec_install_handlers(struct acpi_ec *ec) 807static int ec_install_handlers(struct acpi_ec *ec)
782{ 808{
783 acpi_status status; 809 acpi_status status;
810
784 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 811 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
785 return 0; 812 return 0;
786 status = acpi_install_gpe_handler(NULL, ec->gpe, 813 status = acpi_install_gpe_handler(NULL, ec->gpe,
@@ -981,6 +1008,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
981} 1008}
982 1009
983/* 1010/*
1011 * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
1012 * which case, we complete the QR_EC without issuing it to the firmware.
1013 * https://bugzilla.kernel.org/show_bug.cgi?id=86211
1014 */
1015static int ec_flag_query_handshake(const struct dmi_system_id *id)
1016{
1017 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1018 EC_FLAGS_QUERY_HANDSHAKE = 1;
1019 return 0;
1020}
1021
1022/*
984 * On some hardware it is necessary to clear events accumulated by the EC during 1023 * On some hardware it is necessary to clear events accumulated by the EC during
985 * sleep. These ECs stop reporting GPEs until they are manually polled, if too 1024 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
986 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) 1025 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
@@ -1054,6 +1093,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
1054 { 1093 {
1055 ec_clear_on_resume, "Samsung hardware", { 1094 ec_clear_on_resume, "Samsung hardware", {
1056 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, 1095 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1096 {
1097 ec_flag_query_handshake, "Acer hardware", {
1098 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
1057 {}, 1099 {},
1058}; 1100};
1059 1101
@@ -1078,7 +1120,8 @@ int __init acpi_ec_ecdt_probe(void)
1078 boot_ec->data_addr = ecdt_ptr->data.address; 1120 boot_ec->data_addr = ecdt_ptr->data.address;
1079 boot_ec->gpe = ecdt_ptr->gpe; 1121 boot_ec->gpe = ecdt_ptr->gpe;
1080 boot_ec->handle = ACPI_ROOT_OBJECT; 1122 boot_ec->handle = ACPI_ROOT_OBJECT;
1081 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 1123 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
1124 &boot_ec->handle);
1082 /* Don't trust ECDT, which comes from ASUSTek */ 1125 /* Don't trust ECDT, which comes from ASUSTek */
1083 if (!EC_FLAGS_VALIDATE_ECDT) 1126 if (!EC_FLAGS_VALIDATE_ECDT)
1084 goto install; 1127 goto install;
@@ -1162,6 +1205,5 @@ static void __exit acpi_ec_exit(void)
1162{ 1205{
1163 1206
1164 acpi_bus_unregister_driver(&acpi_ec_driver); 1207 acpi_bus_unregister_driver(&acpi_ec_driver);
1165 return;
1166} 1208}
1167#endif /* 0 */ 1209#endif /* 0 */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 5328b1090e08..caf9b76b7ef8 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -30,22 +30,19 @@
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/thermal.h> 31#include <linux/thermal.h>
32#include <linux/acpi.h> 32#include <linux/acpi.h>
33 33#include <linux/platform_device.h>
34#define ACPI_FAN_CLASS "fan" 34#include <linux/sort.h>
35#define ACPI_FAN_FILE_STATE "state"
36
37#define _COMPONENT ACPI_FAN_COMPONENT
38ACPI_MODULE_NAME("fan");
39 35
40MODULE_AUTHOR("Paul Diefenbaugh"); 36MODULE_AUTHOR("Paul Diefenbaugh");
41MODULE_DESCRIPTION("ACPI Fan Driver"); 37MODULE_DESCRIPTION("ACPI Fan Driver");
42MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
43 39
44static int acpi_fan_add(struct acpi_device *device); 40static int acpi_fan_probe(struct platform_device *pdev);
45static int acpi_fan_remove(struct acpi_device *device); 41static int acpi_fan_remove(struct platform_device *pdev);
46 42
47static const struct acpi_device_id fan_device_ids[] = { 43static const struct acpi_device_id fan_device_ids[] = {
48 {"PNP0C0B", 0}, 44 {"PNP0C0B", 0},
45 {"INT3404", 0},
49 {"", 0}, 46 {"", 0},
50}; 47};
51MODULE_DEVICE_TABLE(acpi, fan_device_ids); 48MODULE_DEVICE_TABLE(acpi, fan_device_ids);
@@ -64,37 +61,100 @@ static struct dev_pm_ops acpi_fan_pm = {
64#define FAN_PM_OPS_PTR NULL 61#define FAN_PM_OPS_PTR NULL
65#endif 62#endif
66 63
67static struct acpi_driver acpi_fan_driver = { 64struct acpi_fan_fps {
68 .name = "fan", 65 u64 control;
69 .class = ACPI_FAN_CLASS, 66 u64 trip_point;
70 .ids = fan_device_ids, 67 u64 speed;
71 .ops = { 68 u64 noise_level;
72 .add = acpi_fan_add, 69 u64 power;
73 .remove = acpi_fan_remove, 70};
74 }, 71
75 .drv.pm = FAN_PM_OPS_PTR, 72struct acpi_fan_fif {
73 u64 revision;
74 u64 fine_grain_ctrl;
75 u64 step_size;
76 u64 low_speed_notification;
77};
78
79struct acpi_fan {
80 bool acpi4;
81 struct acpi_fan_fif fif;
82 struct acpi_fan_fps *fps;
83 int fps_count;
84 struct thermal_cooling_device *cdev;
85};
86
87static struct platform_driver acpi_fan_driver = {
88 .probe = acpi_fan_probe,
89 .remove = acpi_fan_remove,
90 .driver = {
91 .name = "acpi-fan",
92 .acpi_match_table = fan_device_ids,
93 .pm = FAN_PM_OPS_PTR,
94 },
76}; 95};
77 96
78/* thermal cooling device callbacks */ 97/* thermal cooling device callbacks */
79static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long 98static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
80 *state) 99 *state)
81{ 100{
82 /* ACPI fan device only support two states: ON/OFF */ 101 struct acpi_device *device = cdev->devdata;
83 *state = 1; 102 struct acpi_fan *fan = acpi_driver_data(device);
103
104 if (fan->acpi4)
105 *state = fan->fps_count - 1;
106 else
107 *state = 1;
84 return 0; 108 return 0;
85} 109}
86 110
87static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long 111static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
88 *state) 112{
113 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
114 struct acpi_fan *fan = acpi_driver_data(device);
115 union acpi_object *obj;
116 acpi_status status;
117 int control, i;
118
119 status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
120 if (ACPI_FAILURE(status)) {
121 dev_err(&device->dev, "Get fan state failed\n");
122 return status;
123 }
124
125 obj = buffer.pointer;
126 if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
127 obj->package.count != 3 ||
128 obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
129 dev_err(&device->dev, "Invalid _FST data\n");
130 status = -EINVAL;
131 goto err;
132 }
133
134 control = obj->package.elements[1].integer.value;
135 for (i = 0; i < fan->fps_count; i++) {
136 if (control == fan->fps[i].control)
137 break;
138 }
139 if (i == fan->fps_count) {
140 dev_dbg(&device->dev, "Invalid control value returned\n");
141 status = -EINVAL;
142 goto err;
143 }
144
145 *state = i;
146
147err:
148 kfree(obj);
149 return status;
150}
151
152static int fan_get_state(struct acpi_device *device, unsigned long *state)
89{ 153{
90 struct acpi_device *device = cdev->devdata;
91 int result; 154 int result;
92 int acpi_state = ACPI_STATE_D0; 155 int acpi_state = ACPI_STATE_D0;
93 156
94 if (!device) 157 result = acpi_device_update_power(device, &acpi_state);
95 return -EINVAL;
96
97 result = acpi_bus_update_power(device->handle, &acpi_state);
98 if (result) 158 if (result)
99 return result; 159 return result;
100 160
@@ -103,21 +163,57 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
103 return 0; 163 return 0;
104} 164}
105 165
106static int 166static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
107fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) 167 *state)
108{ 168{
109 struct acpi_device *device = cdev->devdata; 169 struct acpi_device *device = cdev->devdata;
110 int result; 170 struct acpi_fan *fan = acpi_driver_data(device);
111 171
112 if (!device || (state != 0 && state != 1)) 172 if (fan->acpi4)
173 return fan_get_state_acpi4(device, state);
174 else
175 return fan_get_state(device, state);
176}
177
178static int fan_set_state(struct acpi_device *device, unsigned long state)
179{
180 if (state != 0 && state != 1)
113 return -EINVAL; 181 return -EINVAL;
114 182
115 result = acpi_bus_set_power(device->handle, 183 return acpi_device_set_power(device,
116 state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); 184 state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
185}
117 186
118 return result; 187static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state)
188{
189 struct acpi_fan *fan = acpi_driver_data(device);
190 acpi_status status;
191
192 if (state >= fan->fps_count)
193 return -EINVAL;
194
195 status = acpi_execute_simple_method(device->handle, "_FSL",
196 fan->fps[state].control);
197 if (ACPI_FAILURE(status)) {
198 dev_dbg(&device->dev, "Failed to set state by _FSL\n");
199 return status;
200 }
201
202 return 0;
119} 203}
120 204
205static int
206fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
207{
208 struct acpi_device *device = cdev->devdata;
209 struct acpi_fan *fan = acpi_driver_data(device);
210
211 if (fan->acpi4)
212 return fan_set_state_acpi4(device, state);
213 else
214 return fan_set_state(device, state);
215 }
216
121static const struct thermal_cooling_device_ops fan_cooling_ops = { 217static const struct thermal_cooling_device_ops fan_cooling_ops = {
122 .get_max_state = fan_get_max_state, 218 .get_max_state = fan_get_max_state,
123 .get_cur_state = fan_get_cur_state, 219 .get_cur_state = fan_get_cur_state,
@@ -129,21 +225,125 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
129 * -------------------------------------------------------------------------- 225 * --------------------------------------------------------------------------
130*/ 226*/
131 227
132static int acpi_fan_add(struct acpi_device *device) 228static bool acpi_fan_is_acpi4(struct acpi_device *device)
133{ 229{
134 int result = 0; 230 return acpi_has_method(device->handle, "_FIF") &&
135 struct thermal_cooling_device *cdev; 231 acpi_has_method(device->handle, "_FPS") &&
232 acpi_has_method(device->handle, "_FSL") &&
233 acpi_has_method(device->handle, "_FST");
234}
136 235
137 if (!device) 236static int acpi_fan_get_fif(struct acpi_device *device)
138 return -EINVAL; 237{
238 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
239 struct acpi_fan *fan = acpi_driver_data(device);
240 struct acpi_buffer format = { sizeof("NNNN"), "NNNN" };
241 struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif };
242 union acpi_object *obj;
243 acpi_status status;
244
245 status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer);
246 if (ACPI_FAILURE(status))
247 return status;
248
249 obj = buffer.pointer;
250 if (!obj || obj->type != ACPI_TYPE_PACKAGE) {
251 dev_err(&device->dev, "Invalid _FIF data\n");
252 status = -EINVAL;
253 goto err;
254 }
139 255
140 strcpy(acpi_device_name(device), "Fan"); 256 status = acpi_extract_package(obj, &format, &fif);
141 strcpy(acpi_device_class(device), ACPI_FAN_CLASS); 257 if (ACPI_FAILURE(status)) {
258 dev_err(&device->dev, "Invalid _FIF element\n");
259 status = -EINVAL;
260 }
142 261
143 result = acpi_bus_update_power(device->handle, NULL); 262err:
144 if (result) { 263 kfree(obj);
145 dev_err(&device->dev, "Setting initial power state\n"); 264 return status;
146 goto end; 265}
266
267static int acpi_fan_speed_cmp(const void *a, const void *b)
268{
269 const struct acpi_fan_fps *fps1 = a;
270 const struct acpi_fan_fps *fps2 = b;
271 return fps1->speed - fps2->speed;
272}
273
274static int acpi_fan_get_fps(struct acpi_device *device)
275{
276 struct acpi_fan *fan = acpi_driver_data(device);
277 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
278 union acpi_object *obj;
279 acpi_status status;
280 int i;
281
282 status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer);
283 if (ACPI_FAILURE(status))
284 return status;
285
286 obj = buffer.pointer;
287 if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
288 dev_err(&device->dev, "Invalid _FPS data\n");
289 status = -EINVAL;
290 goto err;
291 }
292
293 fan->fps_count = obj->package.count - 1; /* minus revision field */
294 fan->fps = devm_kzalloc(&device->dev,
295 fan->fps_count * sizeof(struct acpi_fan_fps),
296 GFP_KERNEL);
297 if (!fan->fps) {
298 dev_err(&device->dev, "Not enough memory\n");
299 status = -ENOMEM;
300 goto err;
301 }
302 for (i = 0; i < fan->fps_count; i++) {
303 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
304 struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
305 status = acpi_extract_package(&obj->package.elements[i + 1],
306 &format, &fps);
307 if (ACPI_FAILURE(status)) {
308 dev_err(&device->dev, "Invalid _FPS element\n");
309 break;
310 }
311 }
312
313 /* sort the state array according to fan speed in increase order */
314 sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
315 acpi_fan_speed_cmp, NULL);
316
317err:
318 kfree(obj);
319 return status;
320}
321
322static int acpi_fan_probe(struct platform_device *pdev)
323{
324 int result = 0;
325 struct thermal_cooling_device *cdev;
326 struct acpi_fan *fan;
327 struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
328
329 fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
330 if (!fan) {
331 dev_err(&device->dev, "No memory for fan\n");
332 return -ENOMEM;
333 }
334 device->driver_data = fan;
335 platform_set_drvdata(pdev, fan);
336
337 if (acpi_fan_is_acpi4(device)) {
338 if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
339 goto end;
340 fan->acpi4 = true;
341 } else {
342 result = acpi_device_update_power(device, NULL);
343 if (result) {
344 dev_err(&device->dev, "Setting initial power state\n");
345 goto end;
346 }
147 } 347 }
148 348
149 cdev = thermal_cooling_device_register("Fan", device, 349 cdev = thermal_cooling_device_register("Fan", device,
@@ -153,44 +353,32 @@ static int acpi_fan_add(struct acpi_device *device)
153 goto end; 353 goto end;
154 } 354 }
155 355
156 dev_dbg(&device->dev, "registered as cooling_device%d\n", cdev->id); 356 dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
157 357
158 device->driver_data = cdev; 358 fan->cdev = cdev;
159 result = sysfs_create_link(&device->dev.kobj, 359 result = sysfs_create_link(&pdev->dev.kobj,
160 &cdev->device.kobj, 360 &cdev->device.kobj,
161 "thermal_cooling"); 361 "thermal_cooling");
162 if (result) 362 if (result)
163 dev_err(&device->dev, "Failed to create sysfs link " 363 dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
164 "'thermal_cooling'\n");
165 364
166 result = sysfs_create_link(&cdev->device.kobj, 365 result = sysfs_create_link(&cdev->device.kobj,
167 &device->dev.kobj, 366 &pdev->dev.kobj,
168 "device"); 367 "device");
169 if (result) 368 if (result)
170 dev_err(&device->dev, "Failed to create sysfs link 'device'\n"); 369 dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
171
172 dev_info(&device->dev, "ACPI: %s [%s] (%s)\n",
173 acpi_device_name(device), acpi_device_bid(device),
174 !device->power.state ? "on" : "off");
175 370
176end: 371end:
177 return result; 372 return result;
178} 373}
179 374
180static int acpi_fan_remove(struct acpi_device *device) 375static int acpi_fan_remove(struct platform_device *pdev)
181{ 376{
182 struct thermal_cooling_device *cdev; 377 struct acpi_fan *fan = platform_get_drvdata(pdev);
183
184 if (!device)
185 return -EINVAL;
186
187 cdev = acpi_driver_data(device);
188 if (!cdev)
189 return -EINVAL;
190 378
191 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 379 sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
192 sysfs_remove_link(&cdev->device.kobj, "device"); 380 sysfs_remove_link(&fan->cdev->device.kobj, "device");
193 thermal_cooling_device_unregister(cdev); 381 thermal_cooling_device_unregister(fan->cdev);
194 382
195 return 0; 383 return 0;
196} 384}
@@ -198,10 +386,11 @@ static int acpi_fan_remove(struct acpi_device *device)
198#ifdef CONFIG_PM_SLEEP 386#ifdef CONFIG_PM_SLEEP
199static int acpi_fan_suspend(struct device *dev) 387static int acpi_fan_suspend(struct device *dev)
200{ 388{
201 if (!dev) 389 struct acpi_fan *fan = dev_get_drvdata(dev);
202 return -EINVAL; 390 if (fan->acpi4)
391 return 0;
203 392
204 acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0); 393 acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D0);
205 394
206 return AE_OK; 395 return AE_OK;
207} 396}
@@ -209,11 +398,12 @@ static int acpi_fan_suspend(struct device *dev)
209static int acpi_fan_resume(struct device *dev) 398static int acpi_fan_resume(struct device *dev)
210{ 399{
211 int result; 400 int result;
401 struct acpi_fan *fan = dev_get_drvdata(dev);
212 402
213 if (!dev) 403 if (fan->acpi4)
214 return -EINVAL; 404 return 0;
215 405
216 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); 406 result = acpi_device_update_power(ACPI_COMPANION(dev), NULL);
217 if (result) 407 if (result)
218 dev_err(dev, "Error updating fan power state\n"); 408 dev_err(dev, "Error updating fan power state\n");
219 409
@@ -221,4 +411,4 @@ static int acpi_fan_resume(struct device *dev)
221} 411}
222#endif 412#endif
223 413
224module_acpi_driver(acpi_fan_driver); 414module_platform_driver(acpi_fan_driver);
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
new file mode 100644
index 000000000000..a27d31d1ba24
--- /dev/null
+++ b/drivers/acpi/int340x_thermal.c
@@ -0,0 +1,51 @@
1/*
2 * ACPI support for int340x thermal drivers
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Zhang Rui <rui.zhang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/acpi.h>
13#include <linux/module.h>
14
15#include "internal.h"
16
17#define DO_ENUMERATION 0x01
18static const struct acpi_device_id int340x_thermal_device_ids[] = {
19 {"INT3400", DO_ENUMERATION },
20 {"INT3401"},
21 {"INT3402"},
22 {"INT3403"},
23 {"INT3404"},
24 {"INT3406"},
25 {"INT3407"},
26 {"INT3408"},
27 {"INT3409"},
28 {"INT340A"},
29 {"INT340B"},
30 {""},
31};
32
33static int int340x_thermal_handler_attach(struct acpi_device *adev,
34 const struct acpi_device_id *id)
35{
36#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
37 if (id->driver_data == DO_ENUMERATION)
38 acpi_create_platform_device(adev);
39#endif
40 return 1;
41}
42
43static struct acpi_scan_handler int340x_thermal_handler = {
44 .ids = int340x_thermal_device_ids,
45 .attach = int340x_thermal_handler_attach,
46};
47
48void __init acpi_int340x_thermal_init(void)
49{
50 acpi_scan_add_handler(&int340x_thermal_handler);
51}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4c5cf77e7576..447f6d679b29 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -31,6 +31,7 @@ void acpi_pci_link_init(void);
31void acpi_processor_init(void); 31void acpi_processor_init(void);
32void acpi_platform_init(void); 32void acpi_platform_init(void);
33void acpi_pnp_init(void); 33void acpi_pnp_init(void);
34void acpi_int340x_thermal_init(void);
34int acpi_sysfs_init(void); 35int acpi_sysfs_init(void);
35void acpi_container_init(void); 36void acpi_container_init(void);
36void acpi_memory_hotplug_init(void); 37void acpi_memory_hotplug_init(void);
@@ -103,8 +104,6 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
103int acpi_power_on_resources(struct acpi_device *device, int state); 104int acpi_power_on_resources(struct acpi_device *device, int state);
104int acpi_power_transition(struct acpi_device *device, int state); 105int acpi_power_transition(struct acpi_device *device, int state);
105 106
106int acpi_device_update_power(struct acpi_device *device, int *state_p);
107
108int acpi_wakeup_device_init(void); 107int acpi_wakeup_device_init(void);
109 108
110#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC 109#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
@@ -168,13 +167,6 @@ static inline void suspend_nvs_restore(void) {}
168#endif 167#endif
169 168
170/*-------------------------------------------------------------------------- 169/*--------------------------------------------------------------------------
171 Platform bus support
172 -------------------------------------------------------------------------- */
173struct platform_device;
174
175struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
176
177/*--------------------------------------------------------------------------
178 Video 170 Video
179 -------------------------------------------------------------------------- */ 171 -------------------------------------------------------------------------- */
180#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 172#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ae44d8654c82..0476e90b2091 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -142,6 +142,53 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
142} 142}
143 143
144/* 144/*
145 * acpi_companion_match() - Can we match via ACPI companion device
146 * @dev: Device in question
147 *
148 * Check if the given device has an ACPI companion and if that companion has
149 * a valid list of PNP IDs, and if the device is the first (primary) physical
150 * device associated with it.
151 *
152 * If multiple physical devices are attached to a single ACPI companion, we need
153 * to be careful. The usage scenario for this kind of relationship is that all
154 * of the physical devices in question use resources provided by the ACPI
155 * companion. A typical case is an MFD device where all the sub-devices share
156 * the parent's ACPI companion. In such cases we can only allow the primary
157 * (first) physical device to be matched with the help of the companion's PNP
158 * IDs.
159 *
160 * Additional physical devices sharing the ACPI companion can still use
161 * resources available from it but they will be matched normally using functions
162 * provided by their bus types (and analogously for their modalias).
163 */
164static bool acpi_companion_match(const struct device *dev)
165{
166 struct acpi_device *adev;
167 bool ret;
168
169 adev = ACPI_COMPANION(dev);
170 if (!adev)
171 return false;
172
173 if (list_empty(&adev->pnp.ids))
174 return false;
175
176 mutex_lock(&adev->physical_node_lock);
177 if (list_empty(&adev->physical_node_list)) {
178 ret = false;
179 } else {
180 const struct acpi_device_physical_node *node;
181
182 node = list_first_entry(&adev->physical_node_list,
183 struct acpi_device_physical_node, node);
184 ret = node->dev == dev;
185 }
186 mutex_unlock(&adev->physical_node_lock);
187
188 return ret;
189}
190
191/*
145 * Creates uevent modalias field for ACPI enumerated devices. 192 * Creates uevent modalias field for ACPI enumerated devices.
146 * Because the other buses does not support ACPI HIDs & CIDs. 193 * Because the other buses does not support ACPI HIDs & CIDs.
147 * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: 194 * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -149,20 +196,14 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
149 */ 196 */
150int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 197int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
151{ 198{
152 struct acpi_device *acpi_dev;
153 int len; 199 int len;
154 200
155 acpi_dev = ACPI_COMPANION(dev); 201 if (!acpi_companion_match(dev))
156 if (!acpi_dev)
157 return -ENODEV;
158
159 /* Fall back to bus specific way of modalias exporting */
160 if (list_empty(&acpi_dev->pnp.ids))
161 return -ENODEV; 202 return -ENODEV;
162 203
163 if (add_uevent_var(env, "MODALIAS=")) 204 if (add_uevent_var(env, "MODALIAS="))
164 return -ENOMEM; 205 return -ENOMEM;
165 len = create_modalias(acpi_dev, &env->buf[env->buflen - 1], 206 len = create_modalias(ACPI_COMPANION(dev), &env->buf[env->buflen - 1],
166 sizeof(env->buf) - env->buflen); 207 sizeof(env->buf) - env->buflen);
167 if (len <= 0) 208 if (len <= 0)
168 return len; 209 return len;
@@ -179,18 +220,12 @@ EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
179 */ 220 */
180int acpi_device_modalias(struct device *dev, char *buf, int size) 221int acpi_device_modalias(struct device *dev, char *buf, int size)
181{ 222{
182 struct acpi_device *acpi_dev;
183 int len; 223 int len;
184 224
185 acpi_dev = ACPI_COMPANION(dev); 225 if (!acpi_companion_match(dev))
186 if (!acpi_dev)
187 return -ENODEV; 226 return -ENODEV;
188 227
189 /* Fall back to bus specific way of modalias exporting */ 228 len = create_modalias(ACPI_COMPANION(dev), buf, size -1);
190 if (list_empty(&acpi_dev->pnp.ids))
191 return -ENODEV;
192
193 len = create_modalias(acpi_dev, buf, size -1);
194 if (len <= 0) 229 if (len <= 0)
195 return len; 230 return len;
196 buf[len++] = '\n'; 231 buf[len++] = '\n';
@@ -853,6 +888,9 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
853 if (!ids || !handle || acpi_bus_get_device(handle, &adev)) 888 if (!ids || !handle || acpi_bus_get_device(handle, &adev))
854 return NULL; 889 return NULL;
855 890
891 if (!acpi_companion_match(dev))
892 return NULL;
893
856 return __acpi_match_device(adev, ids); 894 return __acpi_match_device(adev, ids);
857} 895}
858EXPORT_SYMBOL_GPL(acpi_match_device); 896EXPORT_SYMBOL_GPL(acpi_match_device);
@@ -1470,7 +1508,7 @@ static void acpi_wakeup_gpe_init(struct acpi_device *device)
1470 if (ACPI_FAILURE(status)) 1508 if (ACPI_FAILURE(status))
1471 return; 1509 return;
1472 1510
1473 wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE); 1511 wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HAS_HANDLER);
1474} 1512}
1475 1513
1476static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 1514static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
@@ -2315,6 +2353,7 @@ int __init acpi_scan_init(void)
2315 acpi_container_init(); 2353 acpi_container_init();
2316 acpi_memory_hotplug_init(); 2354 acpi_memory_hotplug_init();
2317 acpi_pnp_init(); 2355 acpi_pnp_init();
2356 acpi_int340x_thermal_init();
2318 2357
2319 mutex_lock(&acpi_scan_lock); 2358 mutex_lock(&acpi_scan_lock);
2320 /* 2359 /*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 38cb9782d4b8..13e577c80201 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -537,7 +537,7 @@ static ssize_t counter_show(struct kobject *kobj,
537 if (result) 537 if (result)
538 goto end; 538 goto end;
539 539
540 if (!(status & ACPI_EVENT_FLAG_HANDLE)) 540 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
541 size += sprintf(buf + size, " invalid"); 541 size += sprintf(buf + size, " invalid");
542 else if (status & ACPI_EVENT_FLAG_ENABLED) 542 else if (status & ACPI_EVENT_FLAG_ENABLED)
543 size += sprintf(buf + size, " enabled"); 543 size += sprintf(buf + size, " enabled");
@@ -581,7 +581,7 @@ static ssize_t counter_set(struct kobject *kobj,
581 if (result) 581 if (result)
582 goto end; 582 goto end;
583 583
584 if (!(status & ACPI_EVENT_FLAG_HANDLE)) { 584 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
585 printk(KERN_WARNING PREFIX 585 printk(KERN_WARNING PREFIX
586 "Can not change Invalid GPE/Fixed Event status\n"); 586 "Can not change Invalid GPE/Fixed Event status\n");
587 return -EINVAL; 587 return -EINVAL;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 112817e963e0..d24fa1964eb8 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -528,7 +528,6 @@ static void acpi_thermal_check(void *data)
528} 528}
529 529
530/* sys I/F for generic thermal sysfs support */ 530/* sys I/F for generic thermal sysfs support */
531#define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100)
532 531
533static int thermal_get_temp(struct thermal_zone_device *thermal, 532static int thermal_get_temp(struct thermal_zone_device *thermal,
534 unsigned long *temp) 533 unsigned long *temp)
@@ -543,7 +542,8 @@ static int thermal_get_temp(struct thermal_zone_device *thermal,
543 if (result) 542 if (result)
544 return result; 543 return result;
545 544
546 *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset); 545 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature,
546 tz->kelvin_offset);
547 return 0; 547 return 0;
548} 548}
549 549
@@ -647,7 +647,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
647 647
648 if (tz->trips.critical.flags.valid) { 648 if (tz->trips.critical.flags.valid) {
649 if (!trip) { 649 if (!trip) {
650 *temp = KELVIN_TO_MILLICELSIUS( 650 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
651 tz->trips.critical.temperature, 651 tz->trips.critical.temperature,
652 tz->kelvin_offset); 652 tz->kelvin_offset);
653 return 0; 653 return 0;
@@ -657,7 +657,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
657 657
658 if (tz->trips.hot.flags.valid) { 658 if (tz->trips.hot.flags.valid) {
659 if (!trip) { 659 if (!trip) {
660 *temp = KELVIN_TO_MILLICELSIUS( 660 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
661 tz->trips.hot.temperature, 661 tz->trips.hot.temperature,
662 tz->kelvin_offset); 662 tz->kelvin_offset);
663 return 0; 663 return 0;
@@ -667,7 +667,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
667 667
668 if (tz->trips.passive.flags.valid) { 668 if (tz->trips.passive.flags.valid) {
669 if (!trip) { 669 if (!trip) {
670 *temp = KELVIN_TO_MILLICELSIUS( 670 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
671 tz->trips.passive.temperature, 671 tz->trips.passive.temperature,
672 tz->kelvin_offset); 672 tz->kelvin_offset);
673 return 0; 673 return 0;
@@ -678,7 +678,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
678 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && 678 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
679 tz->trips.active[i].flags.valid; i++) { 679 tz->trips.active[i].flags.valid; i++) {
680 if (!trip) { 680 if (!trip) {
681 *temp = KELVIN_TO_MILLICELSIUS( 681 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
682 tz->trips.active[i].temperature, 682 tz->trips.active[i].temperature,
683 tz->kelvin_offset); 683 tz->kelvin_offset);
684 return 0; 684 return 0;
@@ -694,7 +694,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
694 struct acpi_thermal *tz = thermal->devdata; 694 struct acpi_thermal *tz = thermal->devdata;
695 695
696 if (tz->trips.critical.flags.valid) { 696 if (tz->trips.critical.flags.valid) {
697 *temperature = KELVIN_TO_MILLICELSIUS( 697 *temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
698 tz->trips.critical.temperature, 698 tz->trips.critical.temperature,
699 tz->kelvin_offset); 699 tz->kelvin_offset);
700 return 0; 700 return 0;
@@ -714,8 +714,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
714 714
715 if (type == THERMAL_TRIP_ACTIVE) { 715 if (type == THERMAL_TRIP_ACTIVE) {
716 unsigned long trip_temp; 716 unsigned long trip_temp;
717 unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature, 717 unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
718 tz->kelvin_offset); 718 tz->temperature, tz->kelvin_offset);
719 if (thermal_get_trip_temp(thermal, trip, &trip_temp)) 719 if (thermal_get_trip_temp(thermal, trip, &trip_temp))
720 return -EINVAL; 720 return -EINVAL;
721 721
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 834f35c4bf8d..371ac12d25b1 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -149,6 +149,21 @@ acpi_extract_package(union acpi_object *package,
149 break; 149 break;
150 } 150 }
151 break; 151 break;
152 case ACPI_TYPE_LOCAL_REFERENCE:
153 switch (format_string[i]) {
154 case 'R':
155 size_required += sizeof(void *);
156 tail_offset += sizeof(void *);
157 break;
158 default:
159 printk(KERN_WARNING PREFIX "Invalid package element"
160 " [%d] got reference,"
161 " expecting [%c]\n",
162 i, format_string[i]);
163 return AE_BAD_DATA;
164 break;
165 }
166 break;
152 167
153 case ACPI_TYPE_PACKAGE: 168 case ACPI_TYPE_PACKAGE:
154 default: 169 default:
@@ -247,7 +262,18 @@ acpi_extract_package(union acpi_object *package,
247 break; 262 break;
248 } 263 }
249 break; 264 break;
250 265 case ACPI_TYPE_LOCAL_REFERENCE:
266 switch (format_string[i]) {
267 case 'R':
268 *(void **)head =
269 (void *)element->reference.handle;
270 head += sizeof(void *);
271 break;
272 default:
273 /* Should never get here */
274 break;
275 }
276 break;
251 case ACPI_TYPE_PACKAGE: 277 case ACPI_TYPE_PACKAGE:
252 /* TBD: handle nested packages... */ 278 /* TBD: handle nested packages... */
253 default: 279 default:
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 473ff4892401..950fff9ce453 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -223,9 +223,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
223#undef pr_fmt 223#undef pr_fmt
224#define pr_fmt(fmt) fmt 224#define pr_fmt(fmt) fmt
225 225
226static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) 226static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
227{ 227{
228 dev_set_cma_area(dev, rmem->priv); 228 dev_set_cma_area(dev, rmem->priv);
229 return 0;
229} 230}
230 231
231static void rmem_cma_device_release(struct reserved_mem *rmem, 232static void rmem_cma_device_release(struct reserved_mem *rmem,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 44973196d3fd..9717d5f20139 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1266,6 +1266,8 @@ int dpm_suspend_late(pm_message_t state)
1266 } 1266 }
1267 mutex_unlock(&dpm_list_mtx); 1267 mutex_unlock(&dpm_list_mtx);
1268 async_synchronize_full(); 1268 async_synchronize_full();
1269 if (!error)
1270 error = async_error;
1269 if (error) { 1271 if (error) {
1270 suspend_stats.failed_suspend_late++; 1272 suspend_stats.failed_suspend_late++;
1271 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1273 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 1e5ac0a79696..cd9161a8b3a1 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -275,7 +275,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
275static const struct pci_device_id bcma_pci_bridge_tbl[] = { 275static const struct pci_device_id bcma_pci_bridge_tbl[] = {
276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) }, 277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, 278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */
279 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 279 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
280 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 280 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
281 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 281 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
@@ -285,7 +285,8 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
285 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, 285 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
286 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, 286 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
287 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 287 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
288 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */ 288 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
289 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
289 { 0, }, 290 { 0, },
290}; 291};
291MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); 292MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index d1656c2f70af..1000955ce09d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -132,7 +132,7 @@ static bool bcma_is_core_needed_early(u16 core_id)
132 return false; 132 return false;
133} 133}
134 134
135#ifdef CONFIG_OF 135#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
136static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 136static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
137 struct bcma_device *core) 137 struct bcma_device *core)
138{ 138{
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 2671a3f02f0c..8001e812018b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb)
450 450
451 ret = setup_commands(nq); 451 ret = setup_commands(nq);
452 if (ret) 452 if (ret)
453 goto err_queue; 453 return ret;
454 nullb->nr_queues++; 454 nullb->nr_queues++;
455 } 455 }
456
457 return 0; 456 return 0;
458err_queue:
459 cleanup_queues(nullb);
460 return ret;
461} 457}
462 458
463static int null_add_dev(void) 459static int null_add_dev(void)
@@ -507,7 +503,9 @@ static int null_add_dev(void)
507 goto out_cleanup_queues; 503 goto out_cleanup_queues;
508 } 504 }
509 blk_queue_make_request(nullb->q, null_queue_bio); 505 blk_queue_make_request(nullb->q, null_queue_bio);
510 init_driver_queues(nullb); 506 rv = init_driver_queues(nullb);
507 if (rv)
508 goto out_cleanup_blk_queue;
511 } else { 509 } else {
512 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 510 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
513 if (!nullb->q) { 511 if (!nullb->q) {
@@ -516,7 +514,9 @@ static int null_add_dev(void)
516 } 514 }
517 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 515 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
518 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 516 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
519 init_driver_queues(nullb); 517 rv = init_driver_queues(nullb);
518 if (rv)
519 goto out_cleanup_blk_queue;
520 } 520 }
521 521
522 nullb->q->queuedata = nullb; 522 nullb->q->queuedata = nullb;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 756b8ec00f16..0ebadf93b6c5 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -69,8 +69,6 @@ struct vdc_port {
69 u8 vdisk_mtype; 69 u8 vdisk_mtype;
70 70
71 char disk_name[32]; 71 char disk_name[32];
72
73 struct vio_disk_vtoc label;
74}; 72};
75 73
76static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) 74static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -710,13 +708,6 @@ static int probe_disk(struct vdc_port *port)
710 if (comp.err) 708 if (comp.err)
711 return comp.err; 709 return comp.err;
712 710
713 err = generic_request(port, VD_OP_GET_VTOC,
714 &port->label, sizeof(port->label));
715 if (err < 0) {
716 printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
717 return err;
718 }
719
720 if (vdc_version_supported(port, 1, 1)) { 711 if (vdc_version_supported(port, 1, 1)) {
721 /* vdisk_size should be set during the handshake, if it wasn't 712 /* vdisk_size should be set during the handshake, if it wasn't
722 * then the underlying disk is reserved by another system 713 * then the underlying disk is reserved by another system
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0e63e8aa8279..2ad0b5bce44b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev,
99{ 99{
100 u64 val = 0; 100 u64 val = 0;
101 struct zram *zram = dev_to_zram(dev); 101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
103 102
104 down_read(&zram->init_lock); 103 down_read(&zram->init_lock);
105 if (init_done(zram)) 104 if (init_done(zram)) {
105 struct zram_meta *meta = zram->meta;
106 val = zs_get_total_pages(meta->mem_pool); 106 val = zs_get_total_pages(meta->mem_pool);
107 }
107 up_read(&zram->init_lock); 108 up_read(&zram->init_lock);
108 109
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); 110 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
@@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev,
173 int err; 174 int err;
174 unsigned long val; 175 unsigned long val;
175 struct zram *zram = dev_to_zram(dev); 176 struct zram *zram = dev_to_zram(dev);
176 struct zram_meta *meta = zram->meta;
177 177
178 err = kstrtoul(buf, 10, &val); 178 err = kstrtoul(buf, 10, &val);
179 if (err || val != 0) 179 if (err || val != 0)
180 return -EINVAL; 180 return -EINVAL;
181 181
182 down_read(&zram->init_lock); 182 down_read(&zram->init_lock);
183 if (init_done(zram)) 183 if (init_done(zram)) {
184 struct zram_meta *meta = zram->meta;
184 atomic_long_set(&zram->stats.max_used_pages, 185 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool)); 186 zs_get_total_pages(meta->mem_pool));
187 }
186 up_read(&zram->init_lock); 188 up_read(&zram->init_lock);
187 189
188 return len; 190 return len;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 82759cef9043..04645c09fe5e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1106,7 +1106,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1106 __mix_pool_bytes(r, hash.w, sizeof(hash.w)); 1106 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1107 spin_unlock_irqrestore(&r->lock, flags); 1107 spin_unlock_irqrestore(&r->lock, flags);
1108 1108
1109 memset(workspace, 0, sizeof(workspace)); 1109 memzero_explicit(workspace, sizeof(workspace));
1110 1110
1111 /* 1111 /*
1112 * In case the hash function has some recognizable output 1112 * In case the hash function has some recognizable output
@@ -1118,7 +1118,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1118 hash.w[2] ^= rol32(hash.w[2], 16); 1118 hash.w[2] ^= rol32(hash.w[2], 16);
1119 1119
1120 memcpy(out, &hash, EXTRACT_SIZE); 1120 memcpy(out, &hash, EXTRACT_SIZE);
1121 memset(&hash, 0, sizeof(hash)); 1121 memzero_explicit(&hash, sizeof(hash));
1122} 1122}
1123 1123
1124/* 1124/*
@@ -1175,7 +1175,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1175 } 1175 }
1176 1176
1177 /* Wipe data just returned from memory */ 1177 /* Wipe data just returned from memory */
1178 memset(tmp, 0, sizeof(tmp)); 1178 memzero_explicit(tmp, sizeof(tmp));
1179 1179
1180 return ret; 1180 return ret;
1181} 1181}
@@ -1218,7 +1218,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1218 } 1218 }
1219 1219
1220 /* Wipe data just returned from memory */ 1220 /* Wipe data just returned from memory */
1221 memset(tmp, 0, sizeof(tmp)); 1221 memzero_explicit(tmp, sizeof(tmp));
1222 1222
1223 return ret; 1223 return ret;
1224} 1224}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 2133f9d59d06..43005d4d3348 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -660,11 +660,11 @@ static bool __init
660arch_timer_probed(int type, const struct of_device_id *matches) 660arch_timer_probed(int type, const struct of_device_id *matches)
661{ 661{
662 struct device_node *dn; 662 struct device_node *dn;
663 bool probed = false; 663 bool probed = true;
664 664
665 dn = of_find_matching_node(NULL, matches); 665 dn = of_find_matching_node(NULL, matches);
666 if (dn && of_device_is_available(dn) && (arch_timers_present & type)) 666 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
667 probed = true; 667 probed = false;
668 of_node_put(dn); 668 of_node_put(dn);
669 669
670 return probed; 670 return probed;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 6bbb8b913446..23aaf40cf37f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -18,6 +18,7 @@
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/cpu_cooling.h> 19#include <linux/cpu_cooling.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpufreq-dt.h>
21#include <linux/cpumask.h> 22#include <linux/cpumask.h>
22#include <linux/err.h> 23#include <linux/err.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -146,8 +147,8 @@ try_again:
146 goto try_again; 147 goto try_again;
147 } 148 }
148 149
149 dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n", 150 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
150 cpu, PTR_ERR(cpu_reg)); 151 cpu, PTR_ERR(cpu_reg));
151 } 152 }
152 153
153 cpu_clk = clk_get(cpu_dev, NULL); 154 cpu_clk = clk_get(cpu_dev, NULL);
@@ -178,6 +179,7 @@ try_again:
178 179
179static int cpufreq_init(struct cpufreq_policy *policy) 180static int cpufreq_init(struct cpufreq_policy *policy)
180{ 181{
182 struct cpufreq_dt_platform_data *pd;
181 struct cpufreq_frequency_table *freq_table; 183 struct cpufreq_frequency_table *freq_table;
182 struct thermal_cooling_device *cdev; 184 struct thermal_cooling_device *cdev;
183 struct device_node *np; 185 struct device_node *np;
@@ -185,6 +187,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
185 struct device *cpu_dev; 187 struct device *cpu_dev;
186 struct regulator *cpu_reg; 188 struct regulator *cpu_reg;
187 struct clk *cpu_clk; 189 struct clk *cpu_clk;
190 unsigned long min_uV = ~0, max_uV = 0;
188 unsigned int transition_latency; 191 unsigned int transition_latency;
189 int ret; 192 int ret;
190 193
@@ -204,16 +207,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
204 /* OPPs might be populated at runtime, don't check for error here */ 207 /* OPPs might be populated at runtime, don't check for error here */
205 of_init_opp_table(cpu_dev); 208 of_init_opp_table(cpu_dev);
206 209
207 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
208 if (ret) {
209 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
210 goto out_put_node;
211 }
212
213 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 210 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214 if (!priv) { 211 if (!priv) {
215 ret = -ENOMEM; 212 ret = -ENOMEM;
216 goto out_free_table; 213 goto out_put_node;
217 } 214 }
218 215
219 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 216 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -222,30 +219,51 @@ static int cpufreq_init(struct cpufreq_policy *policy)
222 transition_latency = CPUFREQ_ETERNAL; 219 transition_latency = CPUFREQ_ETERNAL;
223 220
224 if (!IS_ERR(cpu_reg)) { 221 if (!IS_ERR(cpu_reg)) {
225 struct dev_pm_opp *opp; 222 unsigned long opp_freq = 0;
226 unsigned long min_uV, max_uV;
227 int i;
228 223
229 /* 224 /*
230 * OPP is maintained in order of increasing frequency, and 225 * Disable any OPPs where the connected regulator isn't able to
231 * freq_table initialised from OPP is therefore sorted in the 226 * provide the specified voltage and record minimum and maximum
232 * same order. 227 * voltage levels.
233 */ 228 */
234 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 229 while (1) {
235 ; 230 struct dev_pm_opp *opp;
236 rcu_read_lock(); 231 unsigned long opp_uV, tol_uV;
237 opp = dev_pm_opp_find_freq_exact(cpu_dev, 232
238 freq_table[0].frequency * 1000, true); 233 rcu_read_lock();
239 min_uV = dev_pm_opp_get_voltage(opp); 234 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
240 opp = dev_pm_opp_find_freq_exact(cpu_dev, 235 if (IS_ERR(opp)) {
241 freq_table[i-1].frequency * 1000, true); 236 rcu_read_unlock();
242 max_uV = dev_pm_opp_get_voltage(opp); 237 break;
243 rcu_read_unlock(); 238 }
239 opp_uV = dev_pm_opp_get_voltage(opp);
240 rcu_read_unlock();
241
242 tol_uV = opp_uV * priv->voltage_tolerance / 100;
243 if (regulator_is_supported_voltage(cpu_reg, opp_uV,
244 opp_uV + tol_uV)) {
245 if (opp_uV < min_uV)
246 min_uV = opp_uV;
247 if (opp_uV > max_uV)
248 max_uV = opp_uV;
249 } else {
250 dev_pm_opp_disable(cpu_dev, opp_freq);
251 }
252
253 opp_freq++;
254 }
255
244 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 256 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
245 if (ret > 0) 257 if (ret > 0)
246 transition_latency += ret * 1000; 258 transition_latency += ret * 1000;
247 } 259 }
248 260
261 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
262 if (ret) {
263 pr_err("failed to init cpufreq table: %d\n", ret);
264 goto out_free_priv;
265 }
266
249 /* 267 /*
250 * For now, just loading the cooling device; 268 * For now, just loading the cooling device;
251 * thermal DT code takes care of matching them. 269 * thermal DT code takes care of matching them.
@@ -265,9 +283,18 @@ static int cpufreq_init(struct cpufreq_policy *policy)
265 policy->driver_data = priv; 283 policy->driver_data = priv;
266 284
267 policy->clk = cpu_clk; 285 policy->clk = cpu_clk;
268 ret = cpufreq_generic_init(policy, freq_table, transition_latency); 286 ret = cpufreq_table_validate_and_show(policy, freq_table);
269 if (ret) 287 if (ret) {
288 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
289 ret);
270 goto out_cooling_unregister; 290 goto out_cooling_unregister;
291 }
292
293 policy->cpuinfo.transition_latency = transition_latency;
294
295 pd = cpufreq_get_driver_data();
296 if (!pd || !pd->independent_clocks)
297 cpumask_setall(policy->cpus);
271 298
272 of_node_put(np); 299 of_node_put(np);
273 300
@@ -275,9 +302,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
275 302
276out_cooling_unregister: 303out_cooling_unregister:
277 cpufreq_cooling_unregister(priv->cdev); 304 cpufreq_cooling_unregister(priv->cdev);
278 kfree(priv);
279out_free_table:
280 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 305 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
306out_free_priv:
307 kfree(priv);
281out_put_node: 308out_put_node:
282 of_node_put(np); 309 of_node_put(np);
283out_put_reg_clk: 310out_put_reg_clk:
@@ -335,6 +362,8 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
335 if (!IS_ERR(cpu_reg)) 362 if (!IS_ERR(cpu_reg))
336 regulator_put(cpu_reg); 363 regulator_put(cpu_reg);
337 364
365 dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
366
338 ret = cpufreq_register_driver(&dt_cpufreq_driver); 367 ret = cpufreq_register_driver(&dt_cpufreq_driver);
339 if (ret) 368 if (ret)
340 dev_err(cpu_dev, "failed register driver: %d\n", ret); 369 dev_err(cpu_dev, "failed register driver: %d\n", ret);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 24bf76fba141..644b54e1e7d1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -512,7 +512,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
512show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 512show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513show_one(scaling_min_freq, min); 513show_one(scaling_min_freq, min);
514show_one(scaling_max_freq, max); 514show_one(scaling_max_freq, max);
515show_one(scaling_cur_freq, cur); 515
516static ssize_t show_scaling_cur_freq(
517 struct cpufreq_policy *policy, char *buf)
518{
519 ssize_t ret;
520
521 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
522 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
523 else
524 ret = sprintf(buf, "%u\n", policy->cur);
525 return ret;
526}
516 527
517static int cpufreq_set_policy(struct cpufreq_policy *policy, 528static int cpufreq_set_policy(struct cpufreq_policy *policy,
518 struct cpufreq_policy *new_policy); 529 struct cpufreq_policy *new_policy);
@@ -906,11 +917,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
906 if (ret) 917 if (ret)
907 goto err_out_kobj_put; 918 goto err_out_kobj_put;
908 } 919 }
909 if (has_target()) { 920
910 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 921 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
911 if (ret) 922 if (ret)
912 goto err_out_kobj_put; 923 goto err_out_kobj_put;
913 } 924
914 if (cpufreq_driver->bios_limit) { 925 if (cpufreq_driver->bios_limit) {
915 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 926 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
916 if (ret) 927 if (ret)
@@ -1731,6 +1742,21 @@ const char *cpufreq_get_current_driver(void)
1731} 1742}
1732EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 1743EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1733 1744
1745/**
1746 * cpufreq_get_driver_data - return current driver data
1747 *
1748 * Return the private data of the currently loaded cpufreq
1749 * driver, or NULL if no cpufreq driver is loaded.
1750 */
1751void *cpufreq_get_driver_data(void)
1752{
1753 if (cpufreq_driver)
1754 return cpufreq_driver->driver_data;
1755
1756 return NULL;
1757}
1758EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1759
1734/********************************************************************* 1760/*********************************************************************
1735 * NOTIFIER LISTS INTERFACE * 1761 * NOTIFIER LISTS INTERFACE *
1736 *********************************************************************/ 1762 *********************************************************************/
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index ec399ad2f059..1608f7105c9f 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -19,7 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/mailbox.h> 22#include <linux/pl320-ipc.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#define HB_CPUFREQ_CHANGE_NOTE 0x80000001 25#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0668b389c516..27bb6d3877ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
52 return div_s64((int64_t)x << FRAC_BITS, y); 52 return div_s64((int64_t)x << FRAC_BITS, y);
53} 53}
54 54
55static inline int ceiling_fp(int32_t x)
56{
57 int mask, ret;
58
59 ret = fp_toint(x);
60 mask = (1 << FRAC_BITS) - 1;
61 if (x & mask)
62 ret += 1;
63 return ret;
64}
65
55struct sample { 66struct sample {
56 int32_t core_pct_busy; 67 int32_t core_pct_busy;
57 u64 aperf; 68 u64 aperf;
@@ -64,6 +75,7 @@ struct pstate_data {
64 int current_pstate; 75 int current_pstate;
65 int min_pstate; 76 int min_pstate;
66 int max_pstate; 77 int max_pstate;
78 int scaling;
67 int turbo_pstate; 79 int turbo_pstate;
68}; 80};
69 81
@@ -113,6 +125,7 @@ struct pstate_funcs {
113 int (*get_max)(void); 125 int (*get_max)(void);
114 int (*get_min)(void); 126 int (*get_min)(void);
115 int (*get_turbo)(void); 127 int (*get_turbo)(void);
128 int (*get_scaling)(void);
116 void (*set)(struct cpudata*, int pstate); 129 void (*set)(struct cpudata*, int pstate);
117 void (*get_vid)(struct cpudata *); 130 void (*get_vid)(struct cpudata *);
118}; 131};
@@ -138,6 +151,7 @@ struct perf_limits {
138 151
139static struct perf_limits limits = { 152static struct perf_limits limits = {
140 .no_turbo = 0, 153 .no_turbo = 0,
154 .turbo_disabled = 0,
141 .max_perf_pct = 100, 155 .max_perf_pct = 100,
142 .max_perf = int_tofp(1), 156 .max_perf = int_tofp(1),
143 .min_perf_pct = 0, 157 .min_perf_pct = 0,
@@ -218,6 +232,18 @@ static inline void intel_pstate_reset_all_pid(void)
218 } 232 }
219} 233}
220 234
235static inline void update_turbo_state(void)
236{
237 u64 misc_en;
238 struct cpudata *cpu;
239
240 cpu = all_cpu_data[0];
241 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
242 limits.turbo_disabled =
243 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245}
246
221/************************** debugfs begin ************************/ 247/************************** debugfs begin ************************/
222static int pid_param_set(void *data, u64 val) 248static int pid_param_set(void *data, u64 val)
223{ 249{
@@ -274,6 +300,20 @@ static void __init intel_pstate_debug_expose_params(void)
274 return sprintf(buf, "%u\n", limits.object); \ 300 return sprintf(buf, "%u\n", limits.object); \
275 } 301 }
276 302
303static ssize_t show_no_turbo(struct kobject *kobj,
304 struct attribute *attr, char *buf)
305{
306 ssize_t ret;
307
308 update_turbo_state();
309 if (limits.turbo_disabled)
310 ret = sprintf(buf, "%u\n", limits.turbo_disabled);
311 else
312 ret = sprintf(buf, "%u\n", limits.no_turbo);
313
314 return ret;
315}
316
277static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 317static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
278 const char *buf, size_t count) 318 const char *buf, size_t count)
279{ 319{
@@ -283,11 +323,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
283 ret = sscanf(buf, "%u", &input); 323 ret = sscanf(buf, "%u", &input);
284 if (ret != 1) 324 if (ret != 1)
285 return -EINVAL; 325 return -EINVAL;
286 limits.no_turbo = clamp_t(int, input, 0 , 1); 326
327 update_turbo_state();
287 if (limits.turbo_disabled) { 328 if (limits.turbo_disabled) {
288 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
289 limits.no_turbo = limits.turbo_disabled; 330 return -EPERM;
290 } 331 }
332 limits.no_turbo = clamp_t(int, input, 0, 1);
333
291 return count; 334 return count;
292} 335}
293 336
@@ -323,7 +366,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
323 return count; 366 return count;
324} 367}
325 368
326show_one(no_turbo, no_turbo);
327show_one(max_perf_pct, max_perf_pct); 369show_one(max_perf_pct, max_perf_pct);
328show_one(min_perf_pct, min_perf_pct); 370show_one(min_perf_pct, min_perf_pct);
329 371
@@ -394,7 +436,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
394 cpudata->vid.ratio); 436 cpudata->vid.ratio);
395 437
396 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 438 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
397 vid = fp_toint(vid_fp); 439 vid = ceiling_fp(vid_fp);
398 440
399 if (pstate > cpudata->pstate.max_pstate) 441 if (pstate > cpudata->pstate.max_pstate)
400 vid = cpudata->vid.turbo; 442 vid = cpudata->vid.turbo;
@@ -404,6 +446,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
404 wrmsrl(MSR_IA32_PERF_CTL, val); 446 wrmsrl(MSR_IA32_PERF_CTL, val);
405} 447}
406 448
449#define BYT_BCLK_FREQS 5
450static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
451
452static int byt_get_scaling(void)
453{
454 u64 value;
455 int i;
456
457 rdmsrl(MSR_FSB_FREQ, value);
458 i = value & 0x3;
459
460 BUG_ON(i > BYT_BCLK_FREQS);
461
462 return byt_freq_table[i] * 100;
463}
464
407static void byt_get_vid(struct cpudata *cpudata) 465static void byt_get_vid(struct cpudata *cpudata)
408{ 466{
409 u64 value; 467 u64 value;
@@ -449,6 +507,11 @@ static int core_get_turbo_pstate(void)
449 return ret; 507 return ret;
450} 508}
451 509
510static inline int core_get_scaling(void)
511{
512 return 100000;
513}
514
452static void core_set_pstate(struct cpudata *cpudata, int pstate) 515static void core_set_pstate(struct cpudata *cpudata, int pstate)
453{ 516{
454 u64 val; 517 u64 val;
@@ -473,6 +536,7 @@ static struct cpu_defaults core_params = {
473 .get_max = core_get_max_pstate, 536 .get_max = core_get_max_pstate,
474 .get_min = core_get_min_pstate, 537 .get_min = core_get_min_pstate,
475 .get_turbo = core_get_turbo_pstate, 538 .get_turbo = core_get_turbo_pstate,
539 .get_scaling = core_get_scaling,
476 .set = core_set_pstate, 540 .set = core_set_pstate,
477 }, 541 },
478}; 542};
@@ -491,6 +555,7 @@ static struct cpu_defaults byt_params = {
491 .get_min = byt_get_min_pstate, 555 .get_min = byt_get_min_pstate,
492 .get_turbo = byt_get_turbo_pstate, 556 .get_turbo = byt_get_turbo_pstate,
493 .set = byt_set_pstate, 557 .set = byt_set_pstate,
558 .get_scaling = byt_get_scaling,
494 .get_vid = byt_get_vid, 559 .get_vid = byt_get_vid,
495 }, 560 },
496}; 561};
@@ -501,7 +566,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
501 int max_perf_adj; 566 int max_perf_adj;
502 int min_perf; 567 int min_perf;
503 568
504 if (limits.no_turbo) 569 if (limits.no_turbo || limits.turbo_disabled)
505 max_perf = cpu->pstate.max_pstate; 570 max_perf = cpu->pstate.max_pstate;
506 571
507 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 572 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
@@ -516,6 +581,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
516{ 581{
517 int max_perf, min_perf; 582 int max_perf, min_perf;
518 583
584 update_turbo_state();
585
519 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 586 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
520 587
521 pstate = clamp_t(int, pstate, min_perf, max_perf); 588 pstate = clamp_t(int, pstate, min_perf, max_perf);
@@ -523,7 +590,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
523 if (pstate == cpu->pstate.current_pstate) 590 if (pstate == cpu->pstate.current_pstate)
524 return; 591 return;
525 592
526 trace_cpu_frequency(pstate * 100000, cpu->cpu); 593 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
527 594
528 cpu->pstate.current_pstate = pstate; 595 cpu->pstate.current_pstate = pstate;
529 596
@@ -535,6 +602,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
535 cpu->pstate.min_pstate = pstate_funcs.get_min(); 602 cpu->pstate.min_pstate = pstate_funcs.get_min();
536 cpu->pstate.max_pstate = pstate_funcs.get_max(); 603 cpu->pstate.max_pstate = pstate_funcs.get_max();
537 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 604 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
605 cpu->pstate.scaling = pstate_funcs.get_scaling();
538 606
539 if (pstate_funcs.get_vid) 607 if (pstate_funcs.get_vid)
540 pstate_funcs.get_vid(cpu); 608 pstate_funcs.get_vid(cpu);
@@ -550,7 +618,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
550 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 618 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
551 619
552 sample->freq = fp_toint( 620 sample->freq = fp_toint(
553 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 621 mul_fp(int_tofp(
622 cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
623 core_pct));
554 624
555 sample->core_pct_busy = (int32_t)core_pct; 625 sample->core_pct_busy = (int32_t)core_pct;
556} 626}
@@ -671,7 +741,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
671{ 741{
672 struct cpudata *cpu; 742 struct cpudata *cpu;
673 743
674 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 744 if (!all_cpu_data[cpunum])
745 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
746 GFP_KERNEL);
675 if (!all_cpu_data[cpunum]) 747 if (!all_cpu_data[cpunum])
676 return -ENOMEM; 748 return -ENOMEM;
677 749
@@ -714,9 +786,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
714 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 786 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
715 limits.min_perf_pct = 100; 787 limits.min_perf_pct = 100;
716 limits.min_perf = int_tofp(1); 788 limits.min_perf = int_tofp(1);
789 limits.max_policy_pct = 100;
717 limits.max_perf_pct = 100; 790 limits.max_perf_pct = 100;
718 limits.max_perf = int_tofp(1); 791 limits.max_perf = int_tofp(1);
719 limits.no_turbo = limits.turbo_disabled; 792 limits.no_turbo = 0;
720 return 0; 793 return 0;
721 } 794 }
722 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -751,15 +824,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
751 824
752 del_timer_sync(&all_cpu_data[cpu_num]->timer); 825 del_timer_sync(&all_cpu_data[cpu_num]->timer);
753 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
754 kfree(all_cpu_data[cpu_num]);
755 all_cpu_data[cpu_num] = NULL;
756} 827}
757 828
758static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 829static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
759{ 830{
760 struct cpudata *cpu; 831 struct cpudata *cpu;
761 int rc; 832 int rc;
762 u64 misc_en;
763 833
764 rc = intel_pstate_init_cpu(policy->cpu); 834 rc = intel_pstate_init_cpu(policy->cpu);
765 if (rc) 835 if (rc)
@@ -767,23 +837,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
767 837
768 cpu = all_cpu_data[policy->cpu]; 838 cpu = all_cpu_data[policy->cpu];
769 839
770 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
771 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
772 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
773 limits.turbo_disabled = 1;
774 limits.no_turbo = 1;
775 }
776 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 840 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
777 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 841 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
778 else 842 else
779 policy->policy = CPUFREQ_POLICY_POWERSAVE; 843 policy->policy = CPUFREQ_POLICY_POWERSAVE;
780 844
781 policy->min = cpu->pstate.min_pstate * 100000; 845 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
782 policy->max = cpu->pstate.turbo_pstate * 100000; 846 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
783 847
784 /* cpuinfo and default policy values */ 848 /* cpuinfo and default policy values */
785 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 849 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
786 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; 850 policy->cpuinfo.max_freq =
851 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
787 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 852 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
788 cpumask_set_cpu(policy->cpu, policy->cpus); 853 cpumask_set_cpu(policy->cpu, policy->cpus);
789 854
@@ -841,6 +906,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
841 pstate_funcs.get_max = funcs->get_max; 906 pstate_funcs.get_max = funcs->get_max;
842 pstate_funcs.get_min = funcs->get_min; 907 pstate_funcs.get_min = funcs->get_min;
843 pstate_funcs.get_turbo = funcs->get_turbo; 908 pstate_funcs.get_turbo = funcs->get_turbo;
909 pstate_funcs.get_scaling = funcs->get_scaling;
844 pstate_funcs.set = funcs->set; 910 pstate_funcs.set = funcs->set;
845 pstate_funcs.get_vid = funcs->get_vid; 911 pstate_funcs.get_vid = funcs->get_vid;
846} 912}
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 0e70ee28a5ca..4102be01d06a 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -3,7 +3,7 @@
3# 3#
4config MIPS_CPS_CPUIDLE 4config MIPS_CPS_CPUIDLE
5 bool "CPU Idle driver for MIPS CPS platforms" 5 bool "CPU Idle driver for MIPS CPS platforms"
6 depends on CPU_IDLE 6 depends on CPU_IDLE && MIPS_CPS
7 depends on SYS_SUPPORTS_MIPS_CPS 7 depends on SYS_SUPPORTS_MIPS_CPS
8 select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT 8 select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
9 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 9 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index a64be578dab2..7d3a3497dd4c 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -163,7 +163,8 @@ static int powernv_add_idle_states(void)
163 int nr_idle_states = 1; /* Snooze */ 163 int nr_idle_states = 1; /* Snooze */
164 int dt_idle_states; 164 int dt_idle_states;
165 const __be32 *idle_state_flags; 165 const __be32 *idle_state_flags;
166 u32 len_flags, flags; 166 const __be32 *idle_state_latency;
167 u32 len_flags, flags, latency_ns;
167 int i; 168 int i;
168 169
169 /* Currently we have snooze statically defined */ 170 /* Currently we have snooze statically defined */
@@ -180,18 +181,32 @@ static int powernv_add_idle_states(void)
180 return nr_idle_states; 181 return nr_idle_states;
181 } 182 }
182 183
184 idle_state_latency = of_get_property(power_mgt,
185 "ibm,cpu-idle-state-latencies-ns", NULL);
186 if (!idle_state_latency) {
187 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
188 return nr_idle_states;
189 }
190
183 dt_idle_states = len_flags / sizeof(u32); 191 dt_idle_states = len_flags / sizeof(u32);
184 192
185 for (i = 0; i < dt_idle_states; i++) { 193 for (i = 0; i < dt_idle_states; i++) {
186 194
187 flags = be32_to_cpu(idle_state_flags[i]); 195 flags = be32_to_cpu(idle_state_flags[i]);
196
197 /* Cpuidle accepts exit_latency in us and we estimate
198 * target residency to be 10x exit_latency
199 */
200 latency_ns = be32_to_cpu(idle_state_latency[i]);
188 if (flags & IDLE_USE_INST_NAP) { 201 if (flags & IDLE_USE_INST_NAP) {
189 /* Add NAP state */ 202 /* Add NAP state */
190 strcpy(powernv_states[nr_idle_states].name, "Nap"); 203 strcpy(powernv_states[nr_idle_states].name, "Nap");
191 strcpy(powernv_states[nr_idle_states].desc, "Nap"); 204 strcpy(powernv_states[nr_idle_states].desc, "Nap");
192 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID; 205 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID;
193 powernv_states[nr_idle_states].exit_latency = 10; 206 powernv_states[nr_idle_states].exit_latency =
194 powernv_states[nr_idle_states].target_residency = 100; 207 ((unsigned int)latency_ns) / 1000;
208 powernv_states[nr_idle_states].target_residency =
209 ((unsigned int)latency_ns / 100);
195 powernv_states[nr_idle_states].enter = &nap_loop; 210 powernv_states[nr_idle_states].enter = &nap_loop;
196 nr_idle_states++; 211 nr_idle_states++;
197 } 212 }
@@ -202,8 +217,10 @@ static int powernv_add_idle_states(void)
202 strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); 217 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
203 powernv_states[nr_idle_states].flags = 218 powernv_states[nr_idle_states].flags =
204 CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP; 219 CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
205 powernv_states[nr_idle_states].exit_latency = 300; 220 powernv_states[nr_idle_states].exit_latency =
206 powernv_states[nr_idle_states].target_residency = 1000000; 221 ((unsigned int)latency_ns) / 1000;
222 powernv_states[nr_idle_states].target_residency =
223 ((unsigned int)latency_ns / 100);
207 powernv_states[nr_idle_states].enter = &fastsleep_loop; 224 powernv_states[nr_idle_states].enter = &fastsleep_loop;
208 nr_idle_states++; 225 nr_idle_states++;
209 } 226 }
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index df6575f1430d..682288ced4ac 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
562 562
563 if (apiexcp & UECC_EXCP_DETECTED) { 563 if (apiexcp & UECC_EXCP_DETECTED) {
564 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 564 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
565 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 565 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
566 pfn, offset, 0, 566 pfn, offset, 0,
567 csrow, -1, -1, 567 csrow, -1, -1,
568 mci->ctl_name, ""); 568 mci->ctl_name, "");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 3cda79bc8b00..ece3aef16bb1 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
226static void process_ce_no_info(struct mem_ctl_info *mci) 226static void process_ce_no_info(struct mem_ctl_info *mci)
227{ 227{
228 edac_dbg(3, "\n"); 228 edac_dbg(3, "\n");
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, 229 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", ""); 230 "e7xxx CE log register overflow", "");
231} 231}
232 232
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 022a70273ada..aa98b136f5d0 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
242 -1, -1, 242 -1, -1,
243 "i3000 UE", ""); 243 "i3000 UE", "");
244 } else if (log & I3200_ECCERRLOG_CE) { 244 } else if (log & I3200_ECCERRLOG_CE) {
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 245 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
246 0, 0, eccerrlog_syndrome(log), 246 0, 0, eccerrlog_syndrome(log),
247 eccerrlog_row(channel, log), 247 eccerrlog_row(channel, log),
248 -1, -1, 248 -1, -1,
249 "i3000 UE", ""); 249 "i3000 CE", "");
250 } 250 }
251 } 251 }
252} 252}
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 3382f6344e42..4382343a7c60 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
124 dimm->location[0], dimm->location[1], -1, 124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", ""); 125 "i82860 UE", "");
126 else 126 else
127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 127 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
128 info->eap, 0, info->derrsyn, 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1, 129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", ""); 130 "i82860 CE", "");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 64ecbb501c50..8590099ac148 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -41,6 +41,28 @@ struct efi __read_mostly efi = {
41}; 41};
42EXPORT_SYMBOL(efi); 42EXPORT_SYMBOL(efi);
43 43
44static bool disable_runtime;
45static int __init setup_noefi(char *arg)
46{
47 disable_runtime = true;
48 return 0;
49}
50early_param("noefi", setup_noefi);
51
52bool efi_runtime_disabled(void)
53{
54 return disable_runtime;
55}
56
57static int __init parse_efi_cmdline(char *str)
58{
59 if (parse_option_str(str, "noruntime"))
60 disable_runtime = true;
61
62 return 0;
63}
64early_param("efi", parse_efi_cmdline);
65
44static struct kobject *efi_kobj; 66static struct kobject *efi_kobj;
45static struct kobject *efivars_kobj; 67static struct kobject *efivars_kobj;
46 68
@@ -423,3 +445,60 @@ int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
423 return ret; 445 return ret;
424} 446}
425#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ 447#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
448
449static __initdata char memory_type_name[][20] = {
450 "Reserved",
451 "Loader Code",
452 "Loader Data",
453 "Boot Code",
454 "Boot Data",
455 "Runtime Code",
456 "Runtime Data",
457 "Conventional Memory",
458 "Unusable Memory",
459 "ACPI Reclaim Memory",
460 "ACPI Memory NVS",
461 "Memory Mapped I/O",
462 "MMIO Port Space",
463 "PAL Code"
464};
465
466char * __init efi_md_typeattr_format(char *buf, size_t size,
467 const efi_memory_desc_t *md)
468{
469 char *pos;
470 int type_len;
471 u64 attr;
472
473 pos = buf;
474 if (md->type >= ARRAY_SIZE(memory_type_name))
475 type_len = snprintf(pos, size, "[type=%u", md->type);
476 else
477 type_len = snprintf(pos, size, "[%-*s",
478 (int)(sizeof(memory_type_name[0]) - 1),
479 memory_type_name[md->type]);
480 if (type_len >= size)
481 return buf;
482
483 pos += type_len;
484 size -= type_len;
485
486 attr = md->attribute;
487 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
488 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_WP |
489 EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_RUNTIME))
490 snprintf(pos, size, "|attr=0x%016llx]",
491 (unsigned long long)attr);
492 else
493 snprintf(pos, size, "|%3s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
494 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
495 attr & EFI_MEMORY_XP ? "XP" : "",
496 attr & EFI_MEMORY_RP ? "RP" : "",
497 attr & EFI_MEMORY_WP ? "WP" : "",
498 attr & EFI_MEMORY_UCE ? "UCE" : "",
499 attr & EFI_MEMORY_WB ? "WB" : "",
500 attr & EFI_MEMORY_WT ? "WT" : "",
501 attr & EFI_MEMORY_WC ? "WC" : "",
502 attr & EFI_MEMORY_UC ? "UC" : "");
503 return buf;
504}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 480339b6b110..75ee05964cbc 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -226,6 +226,10 @@ unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table,
226 goto fail_free_image; 226 goto fail_free_image;
227 } 227 }
228 228
229 status = efi_parse_options(cmdline_ptr);
230 if (status != EFI_SUCCESS)
231 pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
232
229 /* 233 /*
230 * Unauthenticated device tree data is a security hazard, so 234 * Unauthenticated device tree data is a security hazard, so
231 * ignore 'dtb=' unless UEFI Secure Boot is disabled. 235 * ignore 'dtb=' unless UEFI Secure Boot is disabled.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 32d5cca30f49..a920fec8fe88 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -15,8 +15,23 @@
15 15
16#include "efistub.h" 16#include "efistub.h"
17 17
18/*
19 * Some firmware implementations have problems reading files in one go.
20 * A read chunk size of 1MB seems to work for most platforms.
21 *
22 * Unfortunately, reading files in chunks triggers *other* bugs on some
23 * platforms, so we provide a way to disable this workaround, which can
24 * be done by passing "efi=nochunk" on the EFI boot stub command line.
25 *
26 * If you experience issues with initrd images being corrupt it's worth
27 * trying efi=nochunk, but chunking is enabled by default because there
28 * are far more machines that require the workaround than those that
29 * break with it enabled.
30 */
18#define EFI_READ_CHUNK_SIZE (1024 * 1024) 31#define EFI_READ_CHUNK_SIZE (1024 * 1024)
19 32
33static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
34
20struct file_info { 35struct file_info {
21 efi_file_handle_t *handle; 36 efi_file_handle_t *handle;
22 u64 size; 37 u64 size;
@@ -281,6 +296,49 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
281 efi_call_early(free_pages, addr, nr_pages); 296 efi_call_early(free_pages, addr, nr_pages);
282} 297}
283 298
299/*
300 * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
301 * option, e.g. efi=nochunk.
302 *
303 * It should be noted that efi= is parsed in two very different
304 * environments, first in the early boot environment of the EFI boot
305 * stub, and subsequently during the kernel boot.
306 */
307efi_status_t efi_parse_options(char *cmdline)
308{
309 char *str;
310
311 /*
312 * If no EFI parameters were specified on the cmdline we've got
313 * nothing to do.
314 */
315 str = strstr(cmdline, "efi=");
316 if (!str)
317 return EFI_SUCCESS;
318
319 /* Skip ahead to first argument */
320 str += strlen("efi=");
321
322 /*
323 * Remember, because efi= is also used by the kernel we need to
324 * skip over arguments we don't understand.
325 */
326 while (*str) {
327 if (!strncmp(str, "nochunk", 7)) {
328 str += strlen("nochunk");
329 __chunk_size = -1UL;
330 }
331
332 /* Group words together, delimited by "," */
333 while (*str && *str != ',')
334 str++;
335
336 if (*str == ',')
337 str++;
338 }
339
340 return EFI_SUCCESS;
341}
284 342
285/* 343/*
286 * Check the cmdline for a LILO-style file= arguments. 344 * Check the cmdline for a LILO-style file= arguments.
@@ -423,8 +481,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
423 size = files[j].size; 481 size = files[j].size;
424 while (size) { 482 while (size) {
425 unsigned long chunksize; 483 unsigned long chunksize;
426 if (size > EFI_READ_CHUNK_SIZE) 484 if (size > __chunk_size)
427 chunksize = EFI_READ_CHUNK_SIZE; 485 chunksize = __chunk_size;
428 else 486 else
429 chunksize = size; 487 chunksize = size;
430 488
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 10daa4bbb258..228bbf910461 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -14,11 +14,80 @@
14 * This file is released under the GPLv2. 14 * This file is released under the GPLv2.
15 */ 15 */
16 16
17#include <linux/bug.h>
17#include <linux/efi.h> 18#include <linux/efi.h>
18#include <linux/spinlock.h> /* spinlock_t */ 19#include <linux/mutex.h>
20#include <linux/spinlock.h>
19#include <asm/efi.h> 21#include <asm/efi.h>
20 22
21/* 23/*
24 * According to section 7.1 of the UEFI spec, Runtime Services are not fully
25 * reentrant, and there are particular combinations of calls that need to be
26 * serialized. (source: UEFI Specification v2.4A)
27 *
28 * Table 31. Rules for Reentry Into Runtime Services
29 * +------------------------------------+-------------------------------+
30 * | If previous call is busy in | Forbidden to call |
31 * +------------------------------------+-------------------------------+
32 * | Any | SetVirtualAddressMap() |
33 * +------------------------------------+-------------------------------+
34 * | ConvertPointer() | ConvertPointer() |
35 * +------------------------------------+-------------------------------+
36 * | SetVariable() | ResetSystem() |
37 * | UpdateCapsule() | |
38 * | SetTime() | |
39 * | SetWakeupTime() | |
40 * | GetNextHighMonotonicCount() | |
41 * +------------------------------------+-------------------------------+
42 * | GetVariable() | GetVariable() |
43 * | GetNextVariableName() | GetNextVariableName() |
44 * | SetVariable() | SetVariable() |
45 * | QueryVariableInfo() | QueryVariableInfo() |
46 * | UpdateCapsule() | UpdateCapsule() |
47 * | QueryCapsuleCapabilities() | QueryCapsuleCapabilities() |
48 * | GetNextHighMonotonicCount() | GetNextHighMonotonicCount() |
49 * +------------------------------------+-------------------------------+
50 * | GetTime() | GetTime() |
51 * | SetTime() | SetTime() |
52 * | GetWakeupTime() | GetWakeupTime() |
53 * | SetWakeupTime() | SetWakeupTime() |
54 * +------------------------------------+-------------------------------+
55 *
56 * Due to the fact that the EFI pstore may write to the variable store in
57 * interrupt context, we need to use a spinlock for at least the groups that
58 * contain SetVariable() and QueryVariableInfo(). That leaves little else, as
59 * none of the remaining functions are actually ever called at runtime.
60 * So let's just use a single spinlock to serialize all Runtime Services calls.
61 */
62static DEFINE_SPINLOCK(efi_runtime_lock);
63
64/*
65 * Some runtime services calls can be reentrant under NMI, even if the table
66 * above says they are not. (source: UEFI Specification v2.4A)
67 *
68 * Table 32. Functions that may be called after Machine Check, INIT and NMI
69 * +----------------------------+------------------------------------------+
70 * | Function | Called after Machine Check, INIT and NMI |
71 * +----------------------------+------------------------------------------+
72 * | GetTime() | Yes, even if previously busy. |
73 * | GetVariable() | Yes, even if previously busy |
74 * | GetNextVariableName() | Yes, even if previously busy |
75 * | QueryVariableInfo() | Yes, even if previously busy |
76 * | SetVariable() | Yes, even if previously busy |
77 * | UpdateCapsule() | Yes, even if previously busy |
78 * | QueryCapsuleCapabilities() | Yes, even if previously busy |
79 * | ResetSystem() | Yes, even if previously busy |
80 * +----------------------------+------------------------------------------+
81 *
82 * In order to prevent deadlocks under NMI, the wrappers for these functions
83 * may only grab the efi_runtime_lock or rtc_lock spinlocks if !efi_in_nmi().
84 * However, not all of the services listed are reachable through NMI code paths,
85 * so the the special handling as suggested by the UEFI spec is only implemented
86 * for QueryVariableInfo() and SetVariable(), as these can be reached in NMI
87 * context through efi_pstore_write().
88 */
89
90/*
22 * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"), 91 * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
23 * the EFI specification requires that callers of the time related runtime 92 * the EFI specification requires that callers of the time related runtime
24 * functions serialize with other CMOS accesses in the kernel, as the EFI time 93 * functions serialize with other CMOS accesses in the kernel, as the EFI time
@@ -32,7 +101,9 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
32 efi_status_t status; 101 efi_status_t status;
33 102
34 spin_lock_irqsave(&rtc_lock, flags); 103 spin_lock_irqsave(&rtc_lock, flags);
104 spin_lock(&efi_runtime_lock);
35 status = efi_call_virt(get_time, tm, tc); 105 status = efi_call_virt(get_time, tm, tc);
106 spin_unlock(&efi_runtime_lock);
36 spin_unlock_irqrestore(&rtc_lock, flags); 107 spin_unlock_irqrestore(&rtc_lock, flags);
37 return status; 108 return status;
38} 109}
@@ -43,7 +114,9 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
43 efi_status_t status; 114 efi_status_t status;
44 115
45 spin_lock_irqsave(&rtc_lock, flags); 116 spin_lock_irqsave(&rtc_lock, flags);
117 spin_lock(&efi_runtime_lock);
46 status = efi_call_virt(set_time, tm); 118 status = efi_call_virt(set_time, tm);
119 spin_unlock(&efi_runtime_lock);
47 spin_unlock_irqrestore(&rtc_lock, flags); 120 spin_unlock_irqrestore(&rtc_lock, flags);
48 return status; 121 return status;
49} 122}
@@ -56,7 +129,9 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
56 efi_status_t status; 129 efi_status_t status;
57 130
58 spin_lock_irqsave(&rtc_lock, flags); 131 spin_lock_irqsave(&rtc_lock, flags);
132 spin_lock(&efi_runtime_lock);
59 status = efi_call_virt(get_wakeup_time, enabled, pending, tm); 133 status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
134 spin_unlock(&efi_runtime_lock);
60 spin_unlock_irqrestore(&rtc_lock, flags); 135 spin_unlock_irqrestore(&rtc_lock, flags);
61 return status; 136 return status;
62} 137}
@@ -67,7 +142,9 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
67 efi_status_t status; 142 efi_status_t status;
68 143
69 spin_lock_irqsave(&rtc_lock, flags); 144 spin_lock_irqsave(&rtc_lock, flags);
145 spin_lock(&efi_runtime_lock);
70 status = efi_call_virt(set_wakeup_time, enabled, tm); 146 status = efi_call_virt(set_wakeup_time, enabled, tm);
147 spin_unlock(&efi_runtime_lock);
71 spin_unlock_irqrestore(&rtc_lock, flags); 148 spin_unlock_irqrestore(&rtc_lock, flags);
72 return status; 149 return status;
73} 150}
@@ -78,14 +155,27 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
78 unsigned long *data_size, 155 unsigned long *data_size,
79 void *data) 156 void *data)
80{ 157{
81 return efi_call_virt(get_variable, name, vendor, attr, data_size, data); 158 unsigned long flags;
159 efi_status_t status;
160
161 spin_lock_irqsave(&efi_runtime_lock, flags);
162 status = efi_call_virt(get_variable, name, vendor, attr, data_size,
163 data);
164 spin_unlock_irqrestore(&efi_runtime_lock, flags);
165 return status;
82} 166}
83 167
84static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, 168static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
85 efi_char16_t *name, 169 efi_char16_t *name,
86 efi_guid_t *vendor) 170 efi_guid_t *vendor)
87{ 171{
88 return efi_call_virt(get_next_variable, name_size, name, vendor); 172 unsigned long flags;
173 efi_status_t status;
174
175 spin_lock_irqsave(&efi_runtime_lock, flags);
176 status = efi_call_virt(get_next_variable, name_size, name, vendor);
177 spin_unlock_irqrestore(&efi_runtime_lock, flags);
178 return status;
89} 179}
90 180
91static efi_status_t virt_efi_set_variable(efi_char16_t *name, 181static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -94,24 +184,61 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
94 unsigned long data_size, 184 unsigned long data_size,
95 void *data) 185 void *data)
96{ 186{
97 return efi_call_virt(set_variable, name, vendor, attr, data_size, data); 187 unsigned long flags;
188 efi_status_t status;
189
190 spin_lock_irqsave(&efi_runtime_lock, flags);
191 status = efi_call_virt(set_variable, name, vendor, attr, data_size,
192 data);
193 spin_unlock_irqrestore(&efi_runtime_lock, flags);
194 return status;
98} 195}
99 196
197static efi_status_t
198virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
199 u32 attr, unsigned long data_size,
200 void *data)
201{
202 unsigned long flags;
203 efi_status_t status;
204
205 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
206 return EFI_NOT_READY;
207
208 status = efi_call_virt(set_variable, name, vendor, attr, data_size,
209 data);
210 spin_unlock_irqrestore(&efi_runtime_lock, flags);
211 return status;
212}
213
214
100static efi_status_t virt_efi_query_variable_info(u32 attr, 215static efi_status_t virt_efi_query_variable_info(u32 attr,
101 u64 *storage_space, 216 u64 *storage_space,
102 u64 *remaining_space, 217 u64 *remaining_space,
103 u64 *max_variable_size) 218 u64 *max_variable_size)
104{ 219{
220 unsigned long flags;
221 efi_status_t status;
222
105 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 223 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
106 return EFI_UNSUPPORTED; 224 return EFI_UNSUPPORTED;
107 225
108 return efi_call_virt(query_variable_info, attr, storage_space, 226 spin_lock_irqsave(&efi_runtime_lock, flags);
109 remaining_space, max_variable_size); 227 status = efi_call_virt(query_variable_info, attr, storage_space,
228 remaining_space, max_variable_size);
229 spin_unlock_irqrestore(&efi_runtime_lock, flags);
230 return status;
110} 231}
111 232
112static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) 233static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
113{ 234{
114 return efi_call_virt(get_next_high_mono_count, count); 235 unsigned long flags;
236 efi_status_t status;
237
238 spin_lock_irqsave(&efi_runtime_lock, flags);
239 status = efi_call_virt(get_next_high_mono_count, count);
240 spin_unlock_irqrestore(&efi_runtime_lock, flags);
241 return status;
115} 242}
116 243
117static void virt_efi_reset_system(int reset_type, 244static void virt_efi_reset_system(int reset_type,
@@ -119,17 +246,27 @@ static void virt_efi_reset_system(int reset_type,
119 unsigned long data_size, 246 unsigned long data_size,
120 efi_char16_t *data) 247 efi_char16_t *data)
121{ 248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&efi_runtime_lock, flags);
122 __efi_call_virt(reset_system, reset_type, status, data_size, data); 252 __efi_call_virt(reset_system, reset_type, status, data_size, data);
253 spin_unlock_irqrestore(&efi_runtime_lock, flags);
123} 254}
124 255
125static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules, 256static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
126 unsigned long count, 257 unsigned long count,
127 unsigned long sg_list) 258 unsigned long sg_list)
128{ 259{
260 unsigned long flags;
261 efi_status_t status;
262
129 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 263 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
130 return EFI_UNSUPPORTED; 264 return EFI_UNSUPPORTED;
131 265
132 return efi_call_virt(update_capsule, capsules, count, sg_list); 266 spin_lock_irqsave(&efi_runtime_lock, flags);
267 status = efi_call_virt(update_capsule, capsules, count, sg_list);
268 spin_unlock_irqrestore(&efi_runtime_lock, flags);
269 return status;
133} 270}
134 271
135static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, 272static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
@@ -137,11 +274,17 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
137 u64 *max_size, 274 u64 *max_size,
138 int *reset_type) 275 int *reset_type)
139{ 276{
277 unsigned long flags;
278 efi_status_t status;
279
140 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 280 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
141 return EFI_UNSUPPORTED; 281 return EFI_UNSUPPORTED;
142 282
143 return efi_call_virt(query_capsule_caps, capsules, count, max_size, 283 spin_lock_irqsave(&efi_runtime_lock, flags);
144 reset_type); 284 status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
285 reset_type);
286 spin_unlock_irqrestore(&efi_runtime_lock, flags);
287 return status;
145} 288}
146 289
147void efi_native_runtime_setup(void) 290void efi_native_runtime_setup(void)
@@ -153,6 +296,7 @@ void efi_native_runtime_setup(void)
153 efi.get_variable = virt_efi_get_variable; 296 efi.get_variable = virt_efi_get_variable;
154 efi.get_next_variable = virt_efi_get_next_variable; 297 efi.get_next_variable = virt_efi_get_next_variable;
155 efi.set_variable = virt_efi_set_variable; 298 efi.set_variable = virt_efi_set_variable;
299 efi.set_variable_nonblocking = virt_efi_set_variable_nonblocking;
156 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; 300 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
157 efi.reset_system = virt_efi_reset_system; 301 efi.reset_system = virt_efi_reset_system;
158 efi.query_variable_info = virt_efi_query_variable_info; 302 efi.query_variable_info = virt_efi_query_variable_info;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 5abe943e3404..70a0fb10517f 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -321,11 +321,11 @@ static unsigned long var_name_strnsize(efi_char16_t *variable_name,
321 * Print a warning when duplicate EFI variables are encountered and 321 * Print a warning when duplicate EFI variables are encountered and
322 * disable the sysfs workqueue since the firmware is buggy. 322 * disable the sysfs workqueue since the firmware is buggy.
323 */ 323 */
324static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, 324static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
325 unsigned long len16) 325 unsigned long len16)
326{ 326{
327 size_t i, len8 = len16 / sizeof(efi_char16_t); 327 size_t i, len8 = len16 / sizeof(efi_char16_t);
328 char *s8; 328 char *str8;
329 329
330 /* 330 /*
331 * Disable the workqueue since the algorithm it uses for 331 * Disable the workqueue since the algorithm it uses for
@@ -334,16 +334,16 @@ static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
334 */ 334 */
335 efivar_wq_enabled = false; 335 efivar_wq_enabled = false;
336 336
337 s8 = kzalloc(len8, GFP_KERNEL); 337 str8 = kzalloc(len8, GFP_KERNEL);
338 if (!s8) 338 if (!str8)
339 return; 339 return;
340 340
341 for (i = 0; i < len8; i++) 341 for (i = 0; i < len8; i++)
342 s8[i] = s16[i]; 342 str8[i] = str16[i];
343 343
344 printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", 344 printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
345 s8, vendor_guid); 345 str8, vendor_guid);
346 kfree(s8); 346 kfree(str8);
347} 347}
348 348
349/** 349/**
@@ -595,6 +595,39 @@ int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
595} 595}
596EXPORT_SYMBOL_GPL(efivar_entry_set); 596EXPORT_SYMBOL_GPL(efivar_entry_set);
597 597
598/*
599 * efivar_entry_set_nonblocking - call set_variable_nonblocking()
600 *
601 * This function is guaranteed to not block and is suitable for calling
602 * from crash/panic handlers.
603 *
604 * Crucially, this function will not block if it cannot acquire
605 * __efivars->lock. Instead, it returns -EBUSY.
606 */
607static int
608efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
609 u32 attributes, unsigned long size, void *data)
610{
611 const struct efivar_operations *ops = __efivars->ops;
612 unsigned long flags;
613 efi_status_t status;
614
615 if (!spin_trylock_irqsave(&__efivars->lock, flags))
616 return -EBUSY;
617
618 status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
619 if (status != EFI_SUCCESS) {
620 spin_unlock_irqrestore(&__efivars->lock, flags);
621 return -ENOSPC;
622 }
623
624 status = ops->set_variable_nonblocking(name, &vendor, attributes,
625 size, data);
626
627 spin_unlock_irqrestore(&__efivars->lock, flags);
628 return efi_status_to_err(status);
629}
630
598/** 631/**
599 * efivar_entry_set_safe - call set_variable() if enough space in firmware 632 * efivar_entry_set_safe - call set_variable() if enough space in firmware
600 * @name: buffer containing the variable name 633 * @name: buffer containing the variable name
@@ -622,6 +655,20 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
622 if (!ops->query_variable_store) 655 if (!ops->query_variable_store)
623 return -ENOSYS; 656 return -ENOSYS;
624 657
658 /*
659 * If the EFI variable backend provides a non-blocking
660 * ->set_variable() operation and we're in a context where we
661 * cannot block, then we need to use it to avoid live-locks,
662 * since the implication is that the regular ->set_variable()
663 * will block.
664 *
665 * If no ->set_variable_nonblocking() is provided then
666 * ->set_variable() is assumed to be non-blocking.
667 */
668 if (!block && ops->set_variable_nonblocking)
669 return efivar_entry_set_nonblocking(name, vendor, attributes,
670 size, data);
671
625 if (!block) { 672 if (!block) {
626 if (!spin_trylock_irqsave(&__efivars->lock, flags)) 673 if (!spin_trylock_irqsave(&__efivars->lock, flags))
627 return -EBUSY; 674 return -EBUSY;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index e705335101a5..c2a1cba1e984 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -32,6 +32,8 @@ static struct drm_driver driver;
32static const struct pci_device_id pciidlist[] = { 32static const struct pci_device_id pciidlist[] = {
33 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0, 33 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
34 0, 0 }, 34 0, 0 },
35 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
36 0x0001, 0, 0, 0 },
35 {0,} 37 {0,}
36}; 38};
37 39
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3201986bf25e..f66392b6e287 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1711,7 +1711,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1711#define HPD_STORM_DETECT_PERIOD 1000 1711#define HPD_STORM_DETECT_PERIOD 1000
1712#define HPD_STORM_THRESHOLD 5 1712#define HPD_STORM_THRESHOLD 5
1713 1713
1714static int ilk_port_to_hotplug_shift(enum port port) 1714static int pch_port_to_hotplug_shift(enum port port)
1715{ 1715{
1716 switch (port) { 1716 switch (port) {
1717 case PORT_A: 1717 case PORT_A:
@@ -1727,7 +1727,7 @@ static int ilk_port_to_hotplug_shift(enum port port)
1727 } 1727 }
1728} 1728}
1729 1729
1730static int g4x_port_to_hotplug_shift(enum port port) 1730static int i915_port_to_hotplug_shift(enum port port)
1731{ 1731{
1732 switch (port) { 1732 switch (port) {
1733 case PORT_A: 1733 case PORT_A:
@@ -1785,12 +1785,12 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1785 if (port && dev_priv->hpd_irq_port[port]) { 1785 if (port && dev_priv->hpd_irq_port[port]) {
1786 bool long_hpd; 1786 bool long_hpd;
1787 1787
1788 if (IS_G4X(dev)) { 1788 if (HAS_PCH_SPLIT(dev)) {
1789 dig_shift = g4x_port_to_hotplug_shift(port); 1789 dig_shift = pch_port_to_hotplug_shift(port);
1790 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1791 } else {
1792 dig_shift = ilk_port_to_hotplug_shift(port);
1793 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1790 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1791 } else {
1792 dig_shift = i915_port_to_hotplug_shift(port);
1793 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1794 } 1794 }
1795 1795
1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
@@ -3458,12 +3458,13 @@ static void gen8_irq_reset(struct drm_device *dev)
3458void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3458void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3459{ 3459{
3460 unsigned long irqflags; 3460 unsigned long irqflags;
3461 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3461 3462
3462 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3463 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3463 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3464 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3464 ~dev_priv->de_irq_mask[PIPE_B]); 3465 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3465 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3466 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3466 ~dev_priv->de_irq_mask[PIPE_C]); 3467 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3467 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3468 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3468} 3469}
3469 3470
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 507370513f3d..c9e220963a78 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,9 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
73 DRM_FORMAT_ARGB8888, 73 DRM_FORMAT_ARGB8888,
74}; 74};
75 75
76#define DIV_ROUND_CLOSEST_ULL(ll, d) \
77({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
78
79static void intel_increase_pllclock(struct drm_device *dev, 76static void intel_increase_pllclock(struct drm_device *dev,
80 enum pipe pipe); 77 enum pipe pipe);
81static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 78static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -12357,27 +12354,36 @@ static void intel_setup_outputs(struct drm_device *dev)
12357 if (I915_READ(PCH_DP_D) & DP_DETECTED) 12354 if (I915_READ(PCH_DP_D) & DP_DETECTED)
12358 intel_dp_init(dev, PCH_DP_D, PORT_D); 12355 intel_dp_init(dev, PCH_DP_D, PORT_D);
12359 } else if (IS_VALLEYVIEW(dev)) { 12356 } else if (IS_VALLEYVIEW(dev)) {
12360 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 12357 /*
12358 * The DP_DETECTED bit is the latched state of the DDC
12359 * SDA pin at boot. However since eDP doesn't require DDC
12360 * (no way to plug in a DP->HDMI dongle) the DDC pins for
12361 * eDP ports may have been muxed to an alternate function.
12362 * Thus we can't rely on the DP_DETECTED bit alone to detect
12363 * eDP ports. Consult the VBT as well as DP_DETECTED to
12364 * detect eDP ports.
12365 */
12366 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
12361 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 12367 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12362 PORT_B); 12368 PORT_B);
12363 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 12369 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
12364 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 12370 intel_dp_is_edp(dev, PORT_B))
12365 } 12371 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12366 12372
12367 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { 12373 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
12368 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 12374 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12369 PORT_C); 12375 PORT_C);
12370 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 12376 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
12371 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 12377 intel_dp_is_edp(dev, PORT_C))
12372 } 12378 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
12373 12379
12374 if (IS_CHERRYVIEW(dev)) { 12380 if (IS_CHERRYVIEW(dev)) {
12375 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) { 12381 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
12376 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 12382 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12377 PORT_D); 12383 PORT_D);
12378 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 12384 /* eDP not supported on port D, so don't check VBT */
12379 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 12385 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12380 } 12386 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12381 } 12387 }
12382 12388
12383 intel_dsi_init(dev); 12389 intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 07ce04683c30..ba715229a540 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,6 +35,9 @@
35#include <drm/drm_fb_helper.h> 35#include <drm/drm_fb_helper.h>
36#include <drm/drm_dp_mst_helper.h> 36#include <drm/drm_dp_mst_helper.h>
37 37
38#define DIV_ROUND_CLOSEST_ULL(ll, d) \
39({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
40
38/** 41/**
39 * _wait_for - magic (register) wait macro 42 * _wait_for - magic (register) wait macro
40 * 43 *
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 18784470a760..0e018cb49147 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -419,9 +419,8 @@ static uint32_t scale(uint32_t source_val,
419 source_val = clamp(source_val, source_min, source_max); 419 source_val = clamp(source_val, source_min, source_max);
420 420
421 /* avoid overflows */ 421 /* avoid overflows */
422 target_val = (uint64_t)(source_val - source_min) * 422 target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
423 (target_max - target_min); 423 (target_max - target_min), source_max - source_min);
424 do_div(target_val, source_max - source_min);
425 target_val += target_min; 424 target_val += target_min;
426 425
427 return target_val; 426 return target_val;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index 552fdbd45ebe..1d0e33fb5f61 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -113,6 +113,8 @@
113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) 113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
114#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) 114#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
115 115
116#include <subdev/fb.h>
117
116/* 118/*
117 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's 119 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
118 * the GPU itself that does context-switching, but it needs a special 120 * the GPU itself that does context-switching, but it needs a special
@@ -569,8 +571,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
569 gr_def(ctx, 0x407d08, 0x00010040); 571 gr_def(ctx, 0x407d08, 0x00010040);
570 else if (device->chipset < 0xa0) 572 else if (device->chipset < 0xa0)
571 gr_def(ctx, 0x407d08, 0x00390040); 573 gr_def(ctx, 0x407d08, 0x00390040);
572 else 574 else {
573 gr_def(ctx, 0x407d08, 0x003d0040); 575 if (nouveau_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
576 gr_def(ctx, 0x407d08, 0x003d0040);
577 else
578 gr_def(ctx, 0x407d08, 0x003c0040);
579 }
574 gr_def(ctx, 0x407d0c, 0x00000022); 580 gr_def(ctx, 0x407d0c, 0x00000022);
575 } 581 }
576 582
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 589dbb582da2..fd3dbd59d73e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -400,15 +400,20 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
400 struct nouveau_channel **pchan) 400 struct nouveau_channel **pchan)
401{ 401{
402 struct nouveau_cli *cli = (void *)nvif_client(&device->base); 402 struct nouveau_cli *cli = (void *)nvif_client(&device->base);
403 bool super;
403 int ret; 404 int ret;
404 405
406 /* hack until fencenv50 is fixed, and agp access relaxed */
407 super = cli->base.super;
408 cli->base.super = true;
409
405 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); 410 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
406 if (ret) { 411 if (ret) {
407 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); 412 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
408 ret = nouveau_channel_dma(drm, device, handle, pchan); 413 ret = nouveau_channel_dma(drm, device, handle, pchan);
409 if (ret) { 414 if (ret) {
410 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); 415 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
411 return ret; 416 goto done;
412 } 417 }
413 } 418 }
414 419
@@ -416,8 +421,9 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
416 if (ret) { 421 if (ret) {
417 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); 422 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
418 nouveau_channel_del(pchan); 423 nouveau_channel_del(pchan);
419 return ret;
420 } 424 }
421 425
422 return 0; 426done:
427 cli->base.super = super;
428 return ret;
423} 429}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index af9e78546688..0d1396266857 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -572,7 +572,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
572 struct qxl_framebuffer *qfb; 572 struct qxl_framebuffer *qfb;
573 struct qxl_bo *bo, *old_bo = NULL; 573 struct qxl_bo *bo, *old_bo = NULL;
574 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 574 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
575 uint32_t width, height, base_offset;
576 bool recreate_primary = false; 575 bool recreate_primary = false;
577 int ret; 576 int ret;
578 int surf_id; 577 int surf_id;
@@ -602,9 +601,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
602 if (qcrtc->index == 0) 601 if (qcrtc->index == 0)
603 recreate_primary = true; 602 recreate_primary = true;
604 603
605 width = mode->hdisplay; 604 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
606 height = mode->vdisplay; 605 DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
607 base_offset = 0; 606 return -EINVAL;
607 }
608 608
609 ret = qxl_bo_reserve(bo, false); 609 ret = qxl_bo_reserve(bo, false);
610 if (ret != 0) 610 if (ret != 0)
@@ -618,10 +618,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
618 if (recreate_primary) { 618 if (recreate_primary) {
619 qxl_io_destroy_primary(qdev); 619 qxl_io_destroy_primary(qdev);
620 qxl_io_log(qdev, 620 qxl_io_log(qdev,
621 "recreate primary: %dx%d (was %dx%d,%d,%d)\n", 621 "recreate primary: %dx%d,%d,%d\n",
622 width, height, bo->surf.width, 622 bo->surf.width, bo->surf.height,
623 bo->surf.height, bo->surf.stride, bo->surf.format); 623 bo->surf.stride, bo->surf.format);
624 qxl_io_create_primary(qdev, base_offset, bo); 624 qxl_io_create_primary(qdev, 0, bo);
625 bo->is_primary = true; 625 bo->is_primary = true;
626 } 626 }
627 627
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 300d971187c4..0b2929de9f41 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "btcd.h" 28#include "btcd.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "cypress_dpm.h" 30#include "cypress_dpm.h"
@@ -1170,6 +1171,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1170 { 25000, 30000, RADEON_SCLK_UP } 1171 { 25000, 30000, RADEON_SCLK_UP }
1171}; 1172};
1172 1173
1174void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1175 u32 *max_clock)
1176{
1177 u32 i, clock = 0;
1178
1179 if ((table == NULL) || (table->count == 0)) {
1180 *max_clock = clock;
1181 return;
1182 }
1183
1184 for (i = 0; i < table->count; i++) {
1185 if (clock < table->entries[i].clk)
1186 clock = table->entries[i].clk;
1187 }
1188 *max_clock = clock;
1189}
1190
1173void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1191void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1174 u32 clock, u16 max_voltage, u16 *voltage) 1192 u32 clock, u16 max_voltage, u16 *voltage)
1175{ 1193{
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f5c8c0445a94..11a55e9dad7f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -24,6 +24,7 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "radeon_ucode.h" 28#include "radeon_ucode.h"
28#include "cikd.h" 29#include "cikd.h"
29#include "r600_dpm.h" 30#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index c77dad1a4576..4e8432d07f15 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -611,16 +611,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
611{ 611{
612 unsigned i; 612 unsigned i;
613 int r; 613 int r;
614 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 614 unsigned index;
615 u32 tmp; 615 u32 tmp;
616 u64 gpu_addr;
616 617
617 if (!ptr) { 618 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
618 DRM_ERROR("invalid vram scratch pointer\n"); 619 index = R600_WB_DMA_RING_TEST_OFFSET;
619 return -EINVAL; 620 else
620 } 621 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
622
623 gpu_addr = rdev->wb.gpu_addr + index;
621 624
622 tmp = 0xCAFEDEAD; 625 tmp = 0xCAFEDEAD;
623 writel(tmp, ptr); 626 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
624 627
625 r = radeon_ring_lock(rdev, ring, 5); 628 r = radeon_ring_lock(rdev, ring, 5);
626 if (r) { 629 if (r) {
@@ -628,14 +631,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
628 return r; 631 return r;
629 } 632 }
630 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 633 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
631 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 634 radeon_ring_write(ring, lower_32_bits(gpu_addr));
632 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); 635 radeon_ring_write(ring, upper_32_bits(gpu_addr));
633 radeon_ring_write(ring, 1); /* number of DWs to follow */ 636 radeon_ring_write(ring, 1); /* number of DWs to follow */
634 radeon_ring_write(ring, 0xDEADBEEF); 637 radeon_ring_write(ring, 0xDEADBEEF);
635 radeon_ring_unlock_commit(rdev, ring, false); 638 radeon_ring_unlock_commit(rdev, ring, false);
636 639
637 for (i = 0; i < rdev->usec_timeout; i++) { 640 for (i = 0; i < rdev->usec_timeout; i++) {
638 tmp = readl(ptr); 641 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
639 if (tmp == 0xDEADBEEF) 642 if (tmp == 0xDEADBEEF)
640 break; 643 break;
641 DRM_UDELAY(1); 644 DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 47d31e915758..9aad0327e4d1 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "evergreend.h" 28#include "evergreend.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "cypress_dpm.h" 30#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 950af153f30e..2fe8cfc966d9 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -32,7 +32,7 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
32 struct drm_connector *connector; 32 struct drm_connector *connector;
33 struct radeon_connector *radeon_connector = NULL; 33 struct radeon_connector *radeon_connector = NULL;
34 u32 tmp; 34 u32 tmp;
35 u8 *sadb; 35 u8 *sadb = NULL;
36 int sad_count; 36 int sad_count;
37 37
38 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 38 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
@@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
49 49
50 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 50 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
51 if (sad_count < 0) { 51 if (sad_count < 0) {
52 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 52 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
53 return; 53 sad_count = 0;
54 } 54 }
55 55
56 /* program the speaker allocation */ 56 /* program the speaker allocation */
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index c0bbf68dbc27..f312edf4d50e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -155,7 +155,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
155 struct drm_connector *connector; 155 struct drm_connector *connector;
156 struct radeon_connector *radeon_connector = NULL; 156 struct radeon_connector *radeon_connector = NULL;
157 u32 offset, tmp; 157 u32 offset, tmp;
158 u8 *sadb; 158 u8 *sadb = NULL;
159 int sad_count; 159 int sad_count;
160 160
161 if (!dig || !dig->afmt || !dig->afmt->pin) 161 if (!dig || !dig->afmt || !dig->afmt->pin)
@@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
176 } 176 }
177 177
178 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); 178 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
179 if (sad_count <= 0) { 179 if (sad_count < 0) {
180 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 180 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
181 return; 181 sad_count = 0;
182 } 182 }
183 183
184 /* program the speaker allocation */ 184 /* program the speaker allocation */
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 2514d659b1ba..53abd9b17a50 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -133,7 +133,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
133 struct drm_connector *connector; 133 struct drm_connector *connector;
134 struct radeon_connector *radeon_connector = NULL; 134 struct radeon_connector *radeon_connector = NULL;
135 u32 tmp; 135 u32 tmp;
136 u8 *sadb; 136 u8 *sadb = NULL;
137 int sad_count; 137 int sad_count;
138 138
139 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 139 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
@@ -149,9 +149,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
149 } 149 }
150 150
151 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); 151 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
152 if (sad_count <= 0) { 152 if (sad_count < 0) {
153 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 153 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
154 return; 154 sad_count = 0;
155 } 155 }
156 156
157 /* program the speaker allocation */ 157 /* program the speaker allocation */
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 715b181c6243..6d2f16cf2c1c 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "nid.h" 27#include "nid.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "ni_dpm.h" 29#include "ni_dpm.h"
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 100189ec5fa8..aabc343b9a8f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -232,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
232{ 232{
233 unsigned i; 233 unsigned i;
234 int r; 234 int r;
235 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 235 unsigned index;
236 u32 tmp; 236 u32 tmp;
237 u64 gpu_addr;
237 238
238 if (!ptr) { 239 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
239 DRM_ERROR("invalid vram scratch pointer\n"); 240 index = R600_WB_DMA_RING_TEST_OFFSET;
240 return -EINVAL; 241 else
241 } 242 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
243
244 gpu_addr = rdev->wb.gpu_addr + index;
242 245
243 tmp = 0xCAFEDEAD; 246 tmp = 0xCAFEDEAD;
244 writel(tmp, ptr); 247 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
245 248
246 r = radeon_ring_lock(rdev, ring, 4); 249 r = radeon_ring_lock(rdev, ring, 4);
247 if (r) { 250 if (r) {
@@ -249,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
249 return r; 252 return r;
250 } 253 }
251 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 254 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
252 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 255 radeon_ring_write(ring, lower_32_bits(gpu_addr));
253 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 256 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
254 radeon_ring_write(ring, 0xDEADBEEF); 257 radeon_ring_write(ring, 0xDEADBEEF);
255 radeon_ring_unlock_commit(rdev, ring, false); 258 radeon_ring_unlock_commit(rdev, ring, false);
256 259
257 for (i = 0; i < rdev->usec_timeout; i++) { 260 for (i = 0; i < rdev->usec_timeout; i++) {
258 tmp = readl(ptr); 261 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
259 if (tmp == 0xDEADBEEF) 262 if (tmp == 0xDEADBEEF)
260 break; 263 break;
261 DRM_UDELAY(1); 264 DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 9c61b74ef441..f6309bd23e01 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "r600d.h" 28#include "r600d.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "atom.h" 30#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f7c4b226a284..a9717b3fbf1b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1133,6 +1133,8 @@ struct radeon_wb {
1133#define R600_WB_EVENT_OFFSET 3072 1133#define R600_WB_EVENT_OFFSET 3072
1134#define CIK_WB_CP1_WPTR_OFFSET 3328 1134#define CIK_WB_CP1_WPTR_OFFSET 3328
1135#define CIK_WB_CP2_WPTR_OFFSET 3584 1135#define CIK_WB_CP2_WPTR_OFFSET 3584
1136#define R600_WB_DMA_RING_TEST_OFFSET 3588
1137#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
1136 1138
1137/** 1139/**
1138 * struct radeon_pm - power management datas 1140 * struct radeon_pm - power management datas
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f41cc1538e48..ea2676954dde 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1130,7 +1130,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1130 if (radeon_vm_block_size == -1) { 1130 if (radeon_vm_block_size == -1) {
1131 1131
1132 /* Total bits covered by PD + PTs */ 1132 /* Total bits covered by PD + PTs */
1133 unsigned bits = ilog2(radeon_vm_size) + 17; 1133 unsigned bits = ilog2(radeon_vm_size) + 18;
1134 1134
1135 /* Make sure the PD is 4K in size up to 8GB address space. 1135 /* Make sure the PD is 4K in size up to 8GB address space.
1136 Above that split equal between PD and PTs */ 1136 Above that split equal between PD and PTs */
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 02f7710de470..9031f4b69824 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "rs780d.h" 28#include "rs780d.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "rs780_dpm.h" 30#include "rs780_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index e7045b085715..6a5c233361e9 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "rv6xxd.h" 28#include "rv6xxd.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "rv6xx_dpm.h" 30#include "rv6xx_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 3c76e1dcdf04..755a8f96fe46 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "rv770d.h" 28#include "rv770d.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "rv770_dpm.h" 30#include "rv770_dpm.h"
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 9e4d5d7d348f..a53c2e79d9cb 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "sid.h" 27#include "sid.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "si_dpm.h" 29#include "si_dpm.h"
@@ -2916,6 +2917,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2916 bool disable_sclk_switching = false; 2917 bool disable_sclk_switching = false;
2917 u32 mclk, sclk; 2918 u32 mclk, sclk;
2918 u16 vddc, vddci; 2919 u16 vddc, vddci;
2920 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2919 int i; 2921 int i;
2920 2922
2921 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2923 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2949,6 +2951,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2949 } 2951 }
2950 } 2952 }
2951 2953
2954 /* limit clocks to max supported clocks based on voltage dependency tables */
2955 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2956 &max_sclk_vddc);
2957 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2958 &max_mclk_vddci);
2959 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2960 &max_mclk_vddc);
2961
2962 for (i = 0; i < ps->performance_level_count; i++) {
2963 if (max_sclk_vddc) {
2964 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2965 ps->performance_levels[i].sclk = max_sclk_vddc;
2966 }
2967 if (max_mclk_vddci) {
2968 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2969 ps->performance_levels[i].mclk = max_mclk_vddci;
2970 }
2971 if (max_mclk_vddc) {
2972 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2973 ps->performance_levels[i].mclk = max_mclk_vddc;
2974 }
2975 }
2976
2952 /* XXX validate the min clocks required for display */ 2977 /* XXX validate the min clocks required for display */
2953 2978
2954 if (disable_mclk_switching) { 2979 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 3f0e8d7b8dbe..1f8a8833e1be 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "sumod.h" 27#include "sumod.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "cypress_dpm.h" 29#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 57f780053b3e..b4ec5c4e7969 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "trinityd.h" 27#include "trinityd.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "trinity_dpm.h" 29#include "trinity_dpm.h"
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8f5cec67c47d..d395b0bef73b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -709,6 +709,7 @@ out:
709 709
710static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 710static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
711 uint32_t mem_type, 711 uint32_t mem_type,
712 const struct ttm_place *place,
712 bool interruptible, 713 bool interruptible,
713 bool no_wait_gpu) 714 bool no_wait_gpu)
714{ 715{
@@ -720,8 +721,21 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
720 spin_lock(&glob->lru_lock); 721 spin_lock(&glob->lru_lock);
721 list_for_each_entry(bo, &man->lru, lru) { 722 list_for_each_entry(bo, &man->lru, lru) {
722 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 723 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
723 if (!ret) 724 if (!ret) {
725 if (place && (place->fpfn || place->lpfn)) {
726 /* Don't evict this BO if it's outside of the
727 * requested placement range
728 */
729 if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
730 (place->lpfn && place->lpfn <= bo->mem.start)) {
731 __ttm_bo_unreserve(bo);
732 ret = -EBUSY;
733 continue;
734 }
735 }
736
724 break; 737 break;
738 }
725 } 739 }
726 740
727 if (ret) { 741 if (ret) {
@@ -782,7 +796,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
782 return ret; 796 return ret;
783 if (mem->mm_node) 797 if (mem->mm_node)
784 break; 798 break;
785 ret = ttm_mem_evict_first(bdev, mem_type, 799 ret = ttm_mem_evict_first(bdev, mem_type, place,
786 interruptible, no_wait_gpu); 800 interruptible, no_wait_gpu);
787 if (unlikely(ret != 0)) 801 if (unlikely(ret != 0))
788 return ret; 802 return ret;
@@ -994,9 +1008,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
994 1008
995 for (i = 0; i < placement->num_placement; i++) { 1009 for (i = 0; i < placement->num_placement; i++) {
996 const struct ttm_place *heap = &placement->placement[i]; 1010 const struct ttm_place *heap = &placement->placement[i];
997 if (mem->mm_node && heap->lpfn != 0 && 1011 if (mem->mm_node &&
998 (mem->start < heap->fpfn || 1012 (mem->start < heap->fpfn ||
999 mem->start + mem->num_pages > heap->lpfn)) 1013 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1000 continue; 1014 continue;
1001 1015
1002 *new_flags = heap->flags; 1016 *new_flags = heap->flags;
@@ -1007,9 +1021,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1007 1021
1008 for (i = 0; i < placement->num_busy_placement; i++) { 1022 for (i = 0; i < placement->num_busy_placement; i++) {
1009 const struct ttm_place *heap = &placement->busy_placement[i]; 1023 const struct ttm_place *heap = &placement->busy_placement[i];
1010 if (mem->mm_node && heap->lpfn != 0 && 1024 if (mem->mm_node &&
1011 (mem->start < heap->fpfn || 1025 (mem->start < heap->fpfn ||
1012 mem->start + mem->num_pages > heap->lpfn)) 1026 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1013 continue; 1027 continue;
1014 1028
1015 *new_flags = heap->flags; 1029 *new_flags = heap->flags;
@@ -1233,7 +1247,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1233 spin_lock(&glob->lru_lock); 1247 spin_lock(&glob->lru_lock);
1234 while (!list_empty(&man->lru)) { 1248 while (!list_empty(&man->lru)) {
1235 spin_unlock(&glob->lru_lock); 1249 spin_unlock(&glob->lru_lock);
1236 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1250 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1237 if (ret) { 1251 if (ret) {
1238 if (allow_errors) { 1252 if (allow_errors) {
1239 return ret; 1253 return ret;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 84c3cb15ccdd..8bf61d295ffd 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -946,6 +946,12 @@ static const char *keys[KEY_MAX + 1] = {
946 [KEY_BRIGHTNESS_MIN] = "BrightnessMin", 946 [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
947 [KEY_BRIGHTNESS_MAX] = "BrightnessMax", 947 [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
948 [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", 948 [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
949 [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev",
950 [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext",
951 [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
952 [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup",
953 [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept",
954 [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel",
949}; 955};
950 956
951static const char *relatives[REL_MAX + 1] = { 957static const char *relatives[REL_MAX + 1] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd9c9e96cf0e..e23ab8b30626 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -298,6 +298,8 @@
298 298
299#define USB_VENDOR_ID_ELAN 0x04f3 299#define USB_VENDOR_ID_ELAN 0x04f3
300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
301#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
302#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
301 303
302#define USB_VENDOR_ID_ELECOM 0x056e 304#define USB_VENDOR_ID_ELECOM 0x056e
303#define USB_DEVICE_ID_ELECOM_BM084 0x0061 305#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2df7fddbd119..725f22ca47fc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -695,7 +695,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
695 break; 695 break;
696 696
697 case 0x5b: /* TransducerSerialNumber */ 697 case 0x5b: /* TransducerSerialNumber */
698 set_bit(MSC_SERIAL, input->mscbit); 698 usage->type = EV_MSC;
699 usage->code = MSC_SERIAL;
700 bit = input->mscbit;
701 max = MSC_MAX;
699 break; 702 break;
700 703
701 default: goto unknown; 704 default: goto unknown;
@@ -862,6 +865,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
862 case 0x28b: map_key_clear(KEY_FORWARDMAIL); break; 865 case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
863 case 0x28c: map_key_clear(KEY_SEND); break; 866 case 0x28c: map_key_clear(KEY_SEND); break;
864 867
868 case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
869 case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
870 case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
871 case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP); break;
872 case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
873 case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
874
865 default: goto ignore; 875 default: goto ignore;
866 } 876 }
867 break; 877 break;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f3cb5b0a4345..5014bb567b29 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,8 @@ static const struct hid_blacklist {
71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
72 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 76 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
76 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
index c92229d321c9..afc6b58eaa62 100644
--- a/drivers/hwmon/menf21bmc_hwmon.c
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -21,6 +21,7 @@
21#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include <linux/err.h>
24 25
25#define DRV_NAME "menf21bmc_hwmon" 26#define DRV_NAME "menf21bmc_hwmon"
26 27
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bda5994ceb68..8b72cf392b34 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1173 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1173 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1174 &mflow->reg_id[i]); 1174 &mflow->reg_id[i]);
1175 if (err) 1175 if (err)
1176 goto err_free; 1176 goto err_create_flow;
1177 i++; 1177 i++;
1178 } 1178 }
1179 1179
1180 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1180 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1181 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); 1181 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1182 if (err) 1182 if (err)
1183 goto err_free; 1183 goto err_create_flow;
1184 i++;
1184 } 1185 }
1185 1186
1186 return &mflow->ibflow; 1187 return &mflow->ibflow;
1187 1188
1189err_create_flow:
1190 while (i) {
1191 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
1192 i--;
1193 }
1188err_free: 1194err_free:
1189 kfree(mflow); 1195 kfree(mflow);
1190 return ERR_PTR(err); 1196 return ERR_PTR(err);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 0bea5776bcbc..3effa931fce2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2185,7 +2185,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2185 isert_cmd->tx_desc.num_sge = 2; 2185 isert_cmd->tx_desc.num_sge = 2;
2186 } 2186 }
2187 2187
2188 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); 2188 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2189 2189
2190 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2190 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2191 2191
@@ -2871,7 +2871,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2871 &isert_cmd->tx_desc.iscsi_header); 2871 &isert_cmd->tx_desc.iscsi_header);
2872 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2872 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2873 isert_init_send_wr(isert_conn, isert_cmd, 2873 isert_init_send_wr(isert_conn, isert_cmd,
2874 &isert_cmd->tx_desc.send_wr, true); 2874 &isert_cmd->tx_desc.send_wr, false);
2875 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2875 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2876 wr->send_wr_num += 1; 2876 wr->send_wr_num += 1;
2877 } 2877 }
@@ -3140,7 +3140,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3140 3140
3141accept_wait: 3141accept_wait:
3142 ret = down_interruptible(&isert_np->np_sem); 3142 ret = down_interruptible(&isert_np->np_sem);
3143 if (max_accept > 5) 3143 if (ret || max_accept > 5)
3144 return -ENODEV; 3144 return -ENODEV;
3145 3145
3146 spin_lock_bh(&np->np_thread_lock); 3146 spin_lock_bh(&np->np_thread_lock);
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index 62abe2c16670..f8502bb29176 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -70,7 +70,7 @@ static int opencores_kbd_probe(struct platform_device *pdev)
70 70
71 opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res); 71 opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res);
72 if (IS_ERR(opencores_kbd->addr)) 72 if (IS_ERR(opencores_kbd->addr))
73 error = PTR_ERR(opencores_kbd->addr); 73 return PTR_ERR(opencores_kbd->addr);
74 74
75 input->name = pdev->name; 75 input->name = pdev->name;
76 input->phys = "opencores-kbd/input0"; 76 input->phys = "opencores-kbd/input0";
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index c6727dda68f2..ef5e67fb567e 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -86,7 +86,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
86 .max_cols = 8, 86 .max_cols = 8,
87 .max_rows = 12, 87 .max_rows = 12,
88 .col_gpios = 0x0000ff, /* GPIO 0 - 7*/ 88 .col_gpios = 0x0000ff, /* GPIO 0 - 7*/
89 .row_gpios = 0x1fef00, /* GPIO 8-14, 16-20 */ 89 .row_gpios = 0x1f7f00, /* GPIO 8-14, 16-20 */
90 }, 90 },
91 [STMPE2403] = { 91 [STMPE2403] = {
92 .auto_increment = true, 92 .auto_increment = true,
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 719410feb84b..afed8e2b2f94 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1381,7 +1381,7 @@ static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev,
1381 pcu->ofn_reg_addr = value; 1381 pcu->ofn_reg_addr = value;
1382 mutex_unlock(&pcu->cmd_mutex); 1382 mutex_unlock(&pcu->cmd_mutex);
1383 1383
1384 return error ?: count; 1384 return count;
1385} 1385}
1386 1386
1387static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR, 1387static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR,
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 7b1fde93799e..ef6a9d650d69 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -194,7 +194,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
194 struct ff_effect *effect) 194 struct ff_effect *effect)
195{ 195{
196 struct max77693_haptic *haptic = input_get_drvdata(dev); 196 struct max77693_haptic *haptic = input_get_drvdata(dev);
197 uint64_t period_mag_multi; 197 u64 period_mag_multi;
198 198
199 haptic->magnitude = effect->u.rumble.strong_magnitude; 199 haptic->magnitude = effect->u.rumble.strong_magnitude;
200 if (!haptic->magnitude) 200 if (!haptic->magnitude)
@@ -205,8 +205,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
205 * The formula to convert magnitude to pwm_duty as follows: 205 * The formula to convert magnitude to pwm_duty as follows:
206 * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF) 206 * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF)
207 */ 207 */
208 period_mag_multi = (int64_t)(haptic->pwm_dev->period * 208 period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
209 haptic->magnitude);
210 haptic->pwm_duty = (unsigned int)(period_mag_multi >> 209 haptic->pwm_duty = (unsigned int)(period_mag_multi >>
211 MAX_MAGNITUDE_SHIFT); 210 MAX_MAGNITUDE_SHIFT);
212 211
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 735604753568..e097f1ab427f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -55,7 +55,7 @@ static int soc_button_lookup_gpio(struct device *dev, int acpi_index)
55 struct gpio_desc *desc; 55 struct gpio_desc *desc;
56 int gpio; 56 int gpio;
57 57
58 desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index); 58 desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index, GPIOD_ASIS);
59 if (IS_ERR(desc)) 59 if (IS_ERR(desc))
60 return PTR_ERR(desc); 60 return PTR_ERR(desc);
61 61
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 26994f6a2b2a..95a3a6e2faf6 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1536,16 +1536,9 @@ static int psmouse_reconnect(struct serio *serio)
1536{ 1536{
1537 struct psmouse *psmouse = serio_get_drvdata(serio); 1537 struct psmouse *psmouse = serio_get_drvdata(serio);
1538 struct psmouse *parent = NULL; 1538 struct psmouse *parent = NULL;
1539 struct serio_driver *drv = serio->drv;
1540 unsigned char type; 1539 unsigned char type;
1541 int rc = -1; 1540 int rc = -1;
1542 1541
1543 if (!drv || !psmouse) {
1544 psmouse_dbg(psmouse,
1545 "reconnect request, but serio is disconnected, ignoring...\n");
1546 return -1;
1547 }
1548
1549 mutex_lock(&psmouse_mutex); 1542 mutex_lock(&psmouse_mutex);
1550 1543
1551 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { 1544 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index 38298232124f..abd494411e69 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -128,7 +128,7 @@ static void vsxxxaa_drop_bytes(struct vsxxxaa *mouse, int num)
128 if (num >= mouse->count) { 128 if (num >= mouse->count) {
129 mouse->count = 0; 129 mouse->count = 0;
130 } else { 130 } else {
131 memmove(mouse->buf, mouse->buf + num - 1, BUFLEN - num); 131 memmove(mouse->buf, mouse->buf + num, BUFLEN - num);
132 mouse->count -= num; 132 mouse->count -= num;
133 } 133 }
134} 134}
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index cce69d6b9587..58781c8a8aec 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -37,7 +37,7 @@ static irqreturn_t altera_ps2_rxint(int irq, void *dev_id)
37{ 37{
38 struct ps2if *ps2if = dev_id; 38 struct ps2if *ps2if = dev_id;
39 unsigned int status; 39 unsigned int status;
40 int handled = IRQ_NONE; 40 irqreturn_t handled = IRQ_NONE;
41 41
42 while ((status = readl(ps2if->base)) & 0xffff0000) { 42 while ((status = readl(ps2if->base)) & 0xffff0000) {
43 serio_interrupt(ps2if->io, status & 0xff, 0); 43 serio_interrupt(ps2if->io, status & 0xff, 0);
@@ -74,7 +74,7 @@ static void altera_ps2_close(struct serio *io)
74{ 74{
75 struct ps2if *ps2if = io->port_data; 75 struct ps2if *ps2if = io->port_data;
76 76
77 writel(0, ps2if->base); /* disable rx irq */ 77 writel(0, ps2if->base + 4); /* disable rx irq */
78} 78}
79 79
80/* 80/*
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a0bcbb64d06d..faeeb1372462 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -207,17 +207,282 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
207}; 207};
208 208
209/* 209/*
210 * Some laptops do implement active multiplexing mode correctly; 210 * Some Fujitsu notebooks are having trouble with touchpads if
211 * unfortunately they are in minority. 211 * active multiplexing mode is activated. Luckily they don't have
212 * external PS/2 ports so we can safely disable it.
213 * ... apparently some Toshibas don't like MUX mode either and
214 * die horrible death on reboot.
212 */ 215 */
213static const struct dmi_system_id __initconst i8042_dmi_mux_table[] = { 216static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
217 {
218 /* Fujitsu Lifebook P7010/P7010D */
219 .matches = {
220 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
221 DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
222 },
223 },
224 {
225 /* Fujitsu Lifebook P7010 */
226 .matches = {
227 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
228 DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
229 },
230 },
231 {
232 /* Fujitsu Lifebook P5020D */
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
235 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
236 },
237 },
238 {
239 /* Fujitsu Lifebook S2000 */
240 .matches = {
241 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
242 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
243 },
244 },
245 {
246 /* Fujitsu Lifebook S6230 */
247 .matches = {
248 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
249 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
250 },
251 },
252 {
253 /* Fujitsu T70H */
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
256 DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
257 },
258 },
259 {
260 /* Fujitsu-Siemens Lifebook T3010 */
261 .matches = {
262 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
263 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
264 },
265 },
266 {
267 /* Fujitsu-Siemens Lifebook E4010 */
268 .matches = {
269 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
270 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
271 },
272 },
273 {
274 /* Fujitsu-Siemens Amilo Pro 2010 */
275 .matches = {
276 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
277 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
278 },
279 },
280 {
281 /* Fujitsu-Siemens Amilo Pro 2030 */
282 .matches = {
283 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
284 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
285 },
286 },
287 {
288 /*
289 * No data is coming from the touchscreen unless KBC
290 * is in legacy mode.
291 */
292 /* Panasonic CF-29 */
293 .matches = {
294 DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
295 DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
296 },
297 },
298 {
299 /*
300 * HP Pavilion DV4017EA -
301 * errors on MUX ports are reported without raising AUXDATA
302 * causing "spurious NAK" messages.
303 */
304 .matches = {
305 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
306 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
307 },
308 },
309 {
310 /*
311 * HP Pavilion ZT1000 -
312 * like DV4017EA does not raise AUXERR for errors on MUX ports.
313 */
314 .matches = {
315 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
316 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
317 DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
318 },
319 },
320 {
321 /*
322 * HP Pavilion DV4270ca -
323 * like DV4017EA does not raise AUXERR for errors on MUX ports.
324 */
325 .matches = {
326 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
327 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
328 },
329 },
330 {
331 .matches = {
332 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
333 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
334 },
335 },
336 {
337 .matches = {
338 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
339 DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
340 },
341 },
342 {
343 .matches = {
344 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
345 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
346 },
347 },
348 {
349 .matches = {
350 DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
351 DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
352 },
353 },
354 {
355 /* Sharp Actius MM20 */
356 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
358 DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
359 },
360 },
361 {
362 /* Sony Vaio FS-115b */
363 .matches = {
364 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
365 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
366 },
367 },
368 {
369 /*
370 * Sony Vaio FZ-240E -
371 * reset and GET ID commands issued via KBD port are
372 * sometimes being delivered to AUX3.
373 */
374 .matches = {
375 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
376 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
377 },
378 },
214 { 379 {
215 /* 380 /*
216 * Panasonic CF-18 needs to be in MUX mode since the 381 * Most (all?) VAIOs do not have external PS/2 ports nor
217 * touchscreen is on serio3 and it also has touchpad. 382 * they implement active multiplexing properly, and
383 * MUX discovery usually messes up keyboard/touchpad.
218 */ 384 */
219 .matches = { 385 .matches = {
220 DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), 386 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
387 DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
388 },
389 },
390 {
391 /* Amoi M636/A737 */
392 .matches = {
393 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
394 DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
395 },
396 },
397 {
398 /* Lenovo 3000 n100 */
399 .matches = {
400 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
401 DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
402 },
403 },
404 {
405 .matches = {
406 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
407 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
408 },
409 },
410 {
411 /* Acer Aspire 5710 */
412 .matches = {
413 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
414 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
415 },
416 },
417 {
418 /* Gericom Bellagio */
419 .matches = {
420 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
421 DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
422 },
423 },
424 {
425 /* IBM 2656 */
426 .matches = {
427 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
428 DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
429 },
430 },
431 {
432 /* Dell XPS M1530 */
433 .matches = {
434 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
435 DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
436 },
437 },
438 {
439 /* Compal HEL80I */
440 .matches = {
441 DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
442 DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
443 },
444 },
445 {
446 /* Dell Vostro 1510 */
447 .matches = {
448 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
449 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
450 },
451 },
452 {
453 /* Acer Aspire 5536 */
454 .matches = {
455 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
456 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
457 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
458 },
459 },
460 {
461 /* Dell Vostro V13 */
462 .matches = {
463 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
464 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
465 },
466 },
467 {
468 /* Newer HP Pavilion dv4 models */
469 .matches = {
470 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
471 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
472 },
473 },
474 {
475 /* Asus X450LCP */
476 .matches = {
477 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
478 DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
479 },
480 },
481 {
482 /* Avatar AVIU-145A6 */
483 .matches = {
484 DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
485 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
221 }, 486 },
222 }, 487 },
223 { } 488 { }
@@ -364,6 +629,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
364 }, 629 },
365 }, 630 },
366 { 631 {
632 /* Fujitsu A544 laptop */
633 /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
634 .matches = {
635 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
636 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
637 },
638 },
639 {
640 /* Fujitsu AH544 laptop */
641 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
642 .matches = {
643 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
644 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
645 },
646 },
647 {
367 /* Fujitsu U574 laptop */ 648 /* Fujitsu U574 laptop */
368 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ 649 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
369 .matches = { 650 .matches = {
@@ -740,8 +1021,8 @@ static int __init i8042_platform_init(void)
740 if (dmi_check_system(i8042_dmi_noloop_table)) 1021 if (dmi_check_system(i8042_dmi_noloop_table))
741 i8042_noloop = true; 1022 i8042_noloop = true;
742 1023
743 if (dmi_check_system(i8042_dmi_mux_table)) 1024 if (dmi_check_system(i8042_dmi_nomux_table))
744 i8042_nomux = false; 1025 i8042_nomux = true;
745 1026
746 if (dmi_check_system(i8042_dmi_notimeout_table)) 1027 if (dmi_check_system(i8042_dmi_notimeout_table))
747 i8042_notimeout = true; 1028 i8042_notimeout = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 9a97c2b10926..f5a98af3b325 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -39,7 +39,7 @@ static bool i8042_noaux;
39module_param_named(noaux, i8042_noaux, bool, 0); 39module_param_named(noaux, i8042_noaux, bool, 0);
40MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); 40MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
41 41
42static bool i8042_nomux = true; 42static bool i8042_nomux;
43module_param_named(nomux, i8042_nomux, bool, 0); 43module_param_named(nomux, i8042_nomux, bool, 0);
44MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); 44MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present.");
45 45
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index d0ef91fc87d1..b1ae77995968 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -70,11 +70,11 @@
70 * Documentation/input/input-programming.txt for more details. 70 * Documentation/input/input-programming.txt for more details.
71 */ 71 */
72 72
73static int abs_x[3] = {350, 3900, 5}; 73static int abs_x[3] = {150, 4000, 5};
74module_param_array(abs_x, int, NULL, 0); 74module_param_array(abs_x, int, NULL, 0);
75MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz"); 75MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz");
76 76
77static int abs_y[3] = {320, 3750, 40}; 77static int abs_y[3] = {200, 4000, 40};
78module_param_array(abs_y, int, NULL, 0); 78module_param_array(abs_y, int, NULL, 0);
79MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz"); 79MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz");
80 80
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index aa29198fca3e..7440c58b8e6f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -9,26 +9,21 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/ctype.h>
13#include <linux/kernel.h> 13#include <linux/device.h>
14#include <linux/err.h>
14#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/leds.h>
15#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/slab.h>
16#include <linux/spinlock.h> 21#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/timer.h> 22#include <linux/timer.h>
19#include <linux/err.h>
20#include <linux/ctype.h>
21#include <linux/leds.h>
22#include "leds.h" 23#include "leds.h"
23 24
24static struct class *leds_class; 25static struct class *leds_class;
25 26
26static void led_update_brightness(struct led_classdev *led_cdev)
27{
28 if (led_cdev->brightness_get)
29 led_cdev->brightness = led_cdev->brightness_get(led_cdev);
30}
31
32static ssize_t brightness_show(struct device *dev, 27static ssize_t brightness_show(struct device *dev,
33 struct device_attribute *attr, char *buf) 28 struct device_attribute *attr, char *buf)
34{ 29{
@@ -59,14 +54,14 @@ static ssize_t brightness_store(struct device *dev,
59} 54}
60static DEVICE_ATTR_RW(brightness); 55static DEVICE_ATTR_RW(brightness);
61 56
62static ssize_t led_max_brightness_show(struct device *dev, 57static ssize_t max_brightness_show(struct device *dev,
63 struct device_attribute *attr, char *buf) 58 struct device_attribute *attr, char *buf)
64{ 59{
65 struct led_classdev *led_cdev = dev_get_drvdata(dev); 60 struct led_classdev *led_cdev = dev_get_drvdata(dev);
66 61
67 return sprintf(buf, "%u\n", led_cdev->max_brightness); 62 return sprintf(buf, "%u\n", led_cdev->max_brightness);
68} 63}
69static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL); 64static DEVICE_ATTR_RO(max_brightness);
70 65
71#ifdef CONFIG_LEDS_TRIGGERS 66#ifdef CONFIG_LEDS_TRIGGERS
72static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store); 67static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 71b40d3bf776..aaa8eba9099f 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -12,10 +12,11 @@
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/leds.h>
15#include <linux/list.h> 16#include <linux/list.h>
16#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/mutex.h>
17#include <linux/rwsem.h> 19#include <linux/rwsem.h>
18#include <linux/leds.h>
19#include "leds.h" 20#include "leds.h"
20 21
21DECLARE_RWSEM(leds_list_lock); 22DECLARE_RWSEM(leds_list_lock);
@@ -126,3 +127,19 @@ void led_set_brightness(struct led_classdev *led_cdev,
126 __led_set_brightness(led_cdev, brightness); 127 __led_set_brightness(led_cdev, brightness);
127} 128}
128EXPORT_SYMBOL(led_set_brightness); 129EXPORT_SYMBOL(led_set_brightness);
130
131int led_update_brightness(struct led_classdev *led_cdev)
132{
133 int ret = 0;
134
135 if (led_cdev->brightness_get) {
136 ret = led_cdev->brightness_get(led_cdev);
137 if (ret >= 0) {
138 led_cdev->brightness = ret;
139 return 0;
140 }
141 }
142
143 return ret;
144}
145EXPORT_SYMBOL(led_update_brightness);
diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c
index 1c4ed5510f35..75717ba68ae0 100644
--- a/drivers/leds/leds-gpio-register.c
+++ b/drivers/leds/leds-gpio-register.c
@@ -7,9 +7,9 @@
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/leds.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12#include <linux/leds.h>
13 13
14/** 14/**
15 * gpio_led_register_device - register a gpio-led device 15 * gpio_led_register_device - register a gpio-led device
@@ -28,6 +28,9 @@ struct platform_device *__init gpio_led_register_device(
28 struct platform_device *ret; 28 struct platform_device *ret;
29 struct gpio_led_platform_data _pdata = *pdata; 29 struct gpio_led_platform_data _pdata = *pdata;
30 30
31 if (!pdata->num_leds)
32 return ERR_PTR(-EINVAL);
33
31 _pdata.leds = kmemdup(pdata->leds, 34 _pdata.leds = kmemdup(pdata->leds,
32 pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL); 35 pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL);
33 if (!_pdata.leds) 36 if (!_pdata.leds)
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 57ff20fecf57..b4518c8751c8 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -10,17 +10,17 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 */ 12 */
13#include <linux/kernel.h> 13#include <linux/err.h>
14#include <linux/platform_device.h>
15#include <linux/gpio.h> 14#include <linux/gpio.h>
15#include <linux/kernel.h>
16#include <linux/leds.h> 16#include <linux/leds.h>
17#include <linux/module.h>
17#include <linux/of.h> 18#include <linux/of.h>
18#include <linux/of_platform.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h>
20#include <linux/slab.h> 22#include <linux/slab.h>
21#include <linux/workqueue.h> 23#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/err.h>
24 24
25struct gpio_led_data { 25struct gpio_led_data {
26 struct led_classdev cdev; 26 struct led_classdev cdev;
@@ -36,7 +36,7 @@ struct gpio_led_data {
36 36
37static void gpio_led_work(struct work_struct *work) 37static void gpio_led_work(struct work_struct *work)
38{ 38{
39 struct gpio_led_data *led_dat = 39 struct gpio_led_data *led_dat =
40 container_of(work, struct gpio_led_data, work); 40 container_of(work, struct gpio_led_data, work);
41 41
42 if (led_dat->blinking) { 42 if (led_dat->blinking) {
@@ -235,14 +235,12 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
235} 235}
236#endif /* CONFIG_OF_GPIO */ 236#endif /* CONFIG_OF_GPIO */
237 237
238
239static int gpio_led_probe(struct platform_device *pdev) 238static int gpio_led_probe(struct platform_device *pdev)
240{ 239{
241 struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev); 240 struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
242 struct gpio_leds_priv *priv; 241 struct gpio_leds_priv *priv;
243 int i, ret = 0; 242 int i, ret = 0;
244 243
245
246 if (pdata && pdata->num_leds) { 244 if (pdata && pdata->num_leds) {
247 priv = devm_kzalloc(&pdev->dev, 245 priv = devm_kzalloc(&pdev->dev,
248 sizeof_gpio_leds_priv(pdata->num_leds), 246 sizeof_gpio_leds_priv(pdata->num_leds),
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8e1abdcd4c9d..53144fb96167 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -335,7 +335,8 @@ static int lp3944_configure(struct i2c_client *client,
335 } 335 }
336 336
337 /* to expose the default value to userspace */ 337 /* to expose the default value to userspace */
338 led->ldev.brightness = led->status; 338 led->ldev.brightness =
339 (enum led_brightness) led->status;
339 340
340 /* Set the default led status */ 341 /* Set the default led status */
341 err = lp3944_led_set(led, led->status); 342 err = lp3944_led_set(led, led->status);
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 35812e3a37f2..c86c41826476 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -48,7 +48,7 @@ static void gpio_trig_work(struct work_struct *work)
48 if (!gpio_data->gpio) 48 if (!gpio_data->gpio)
49 return; 49 return;
50 50
51 tmp = gpio_get_value(gpio_data->gpio); 51 tmp = gpio_get_value_cansleep(gpio_data->gpio);
52 if (gpio_data->inverted) 52 if (gpio_data->inverted)
53 tmp = !tmp; 53 tmp = !tmp;
54 54
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 6d184dbcaca8..94ed7cefb14d 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,3 +1,7 @@
1# Generic MAILBOX API
2
3obj-$(CONFIG_MAILBOX) += mailbox.o
4
1obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o 5obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
2 6
3obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o 7obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000000..afcb430508ec
--- /dev/null
+++ b/drivers/mailbox/mailbox.c
@@ -0,0 +1,465 @@
1/*
2 * Mailbox: Common code for Mailbox controllers and users
3 *
4 * Copyright (C) 2013-2014 Linaro Ltd.
5 * Author: Jassi Brar <jassisinghbrar@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/device.h>
20#include <linux/bitops.h>
21#include <linux/mailbox_client.h>
22#include <linux/mailbox_controller.h>
23
24#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
25#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
26#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
27
28static LIST_HEAD(mbox_cons);
29static DEFINE_MUTEX(con_mutex);
30
31static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
32{
33 int idx;
34 unsigned long flags;
35
36 spin_lock_irqsave(&chan->lock, flags);
37
38 /* See if there is any space left */
39 if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
40 spin_unlock_irqrestore(&chan->lock, flags);
41 return -ENOBUFS;
42 }
43
44 idx = chan->msg_free;
45 chan->msg_data[idx] = mssg;
46 chan->msg_count++;
47
48 if (idx == MBOX_TX_QUEUE_LEN - 1)
49 chan->msg_free = 0;
50 else
51 chan->msg_free++;
52
53 spin_unlock_irqrestore(&chan->lock, flags);
54
55 return idx;
56}
57
58static void msg_submit(struct mbox_chan *chan)
59{
60 unsigned count, idx;
61 unsigned long flags;
62 void *data;
63 int err;
64
65 spin_lock_irqsave(&chan->lock, flags);
66
67 if (!chan->msg_count || chan->active_req)
68 goto exit;
69
70 count = chan->msg_count;
71 idx = chan->msg_free;
72 if (idx >= count)
73 idx -= count;
74 else
75 idx += MBOX_TX_QUEUE_LEN - count;
76
77 data = chan->msg_data[idx];
78
79 /* Try to submit a message to the MBOX controller */
80 err = chan->mbox->ops->send_data(chan, data);
81 if (!err) {
82 chan->active_req = data;
83 chan->msg_count--;
84 }
85exit:
86 spin_unlock_irqrestore(&chan->lock, flags);
87}
88
89static void tx_tick(struct mbox_chan *chan, int r)
90{
91 unsigned long flags;
92 void *mssg;
93
94 spin_lock_irqsave(&chan->lock, flags);
95 mssg = chan->active_req;
96 chan->active_req = NULL;
97 spin_unlock_irqrestore(&chan->lock, flags);
98
99 /* Submit next message */
100 msg_submit(chan);
101
102 /* Notify the client */
103 if (mssg && chan->cl->tx_done)
104 chan->cl->tx_done(chan->cl, mssg, r);
105
106 if (chan->cl->tx_block)
107 complete(&chan->tx_complete);
108}
109
110static void poll_txdone(unsigned long data)
111{
112 struct mbox_controller *mbox = (struct mbox_controller *)data;
113 bool txdone, resched = false;
114 int i;
115
116 for (i = 0; i < mbox->num_chans; i++) {
117 struct mbox_chan *chan = &mbox->chans[i];
118
119 if (chan->active_req && chan->cl) {
120 resched = true;
121 txdone = chan->mbox->ops->last_tx_done(chan);
122 if (txdone)
123 tx_tick(chan, 0);
124 }
125 }
126
127 if (resched)
128 mod_timer(&mbox->poll, jiffies +
129 msecs_to_jiffies(mbox->txpoll_period));
130}
131
132/**
133 * mbox_chan_received_data - A way for controller driver to push data
134 * received from remote to the upper layer.
135 * @chan: Pointer to the mailbox channel on which RX happened.
136 * @mssg: Client specific message typecasted as void *
137 *
138 * After startup and before shutdown any data received on the chan
139 * is passed on to the API via atomic mbox_chan_received_data().
140 * The controller should ACK the RX only after this call returns.
141 */
142void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
143{
144 /* No buffering the received data */
145 if (chan->cl->rx_callback)
146 chan->cl->rx_callback(chan->cl, mssg);
147}
148EXPORT_SYMBOL_GPL(mbox_chan_received_data);
149
150/**
151 * mbox_chan_txdone - A way for controller driver to notify the
152 * framework that the last TX has completed.
153 * @chan: Pointer to the mailbox chan on which TX happened.
154 * @r: Status of last TX - OK or ERROR
155 *
156 * The controller that has IRQ for TX ACK calls this atomic API
157 * to tick the TX state machine. It works only if txdone_irq
158 * is set by the controller.
159 */
160void mbox_chan_txdone(struct mbox_chan *chan, int r)
161{
162 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
163 dev_err(chan->mbox->dev,
164 "Controller can't run the TX ticker\n");
165 return;
166 }
167
168 tx_tick(chan, r);
169}
170EXPORT_SYMBOL_GPL(mbox_chan_txdone);
171
172/**
173 * mbox_client_txdone - The way for a client to run the TX state machine.
174 * @chan: Mailbox channel assigned to this client.
175 * @r: Success status of last transmission.
176 *
177 * The client/protocol had received some 'ACK' packet and it notifies
178 * the API that the last packet was sent successfully. This only works
179 * if the controller can't sense TX-Done.
180 */
181void mbox_client_txdone(struct mbox_chan *chan, int r)
182{
183 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
184 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
185 return;
186 }
187
188 tx_tick(chan, r);
189}
190EXPORT_SYMBOL_GPL(mbox_client_txdone);
191
192/**
193 * mbox_client_peek_data - A way for client driver to pull data
194 * received from remote by the controller.
195 * @chan: Mailbox channel assigned to this client.
196 *
197 * A poke to controller driver for any received data.
198 * The data is actually passed onto client via the
199 * mbox_chan_received_data()
200 * The call can be made from atomic context, so the controller's
201 * implementation of peek_data() must not sleep.
202 *
203 * Return: True, if controller has, and is going to push after this,
204 * some data.
205 * False, if controller doesn't have any data to be read.
206 */
207bool mbox_client_peek_data(struct mbox_chan *chan)
208{
209 if (chan->mbox->ops->peek_data)
210 return chan->mbox->ops->peek_data(chan);
211
212 return false;
213}
214EXPORT_SYMBOL_GPL(mbox_client_peek_data);
215
216/**
217 * mbox_send_message - For client to submit a message to be
218 * sent to the remote.
219 * @chan: Mailbox channel assigned to this client.
220 * @mssg: Client specific message typecasted.
221 *
222 * For client to submit data to the controller destined for a remote
223 * processor. If the client had set 'tx_block', the call will return
224 * either when the remote receives the data or when 'tx_tout' millisecs
225 * run out.
226 * In non-blocking mode, the requests are buffered by the API and a
227 * non-negative token is returned for each queued request. If the request
228 * is not queued, a negative token is returned. Upon failure or successful
229 * TX, the API calls 'tx_done' from atomic context, from which the client
230 * could submit yet another request.
231 * The pointer to message should be preserved until it is sent
232 * over the chan, i.e, tx_done() is made.
233 * This function could be called from atomic context as it simply
234 * queues the data and returns a token against the request.
235 *
236 * Return: Non-negative integer for successful submission (non-blocking mode)
237 * or transmission over chan (blocking mode).
238 * Negative value denotes failure.
239 */
240int mbox_send_message(struct mbox_chan *chan, void *mssg)
241{
242 int t;
243
244 if (!chan || !chan->cl)
245 return -EINVAL;
246
247 t = add_to_rbuf(chan, mssg);
248 if (t < 0) {
249 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
250 return t;
251 }
252
253 msg_submit(chan);
254
255 if (chan->txdone_method == TXDONE_BY_POLL)
256 poll_txdone((unsigned long)chan->mbox);
257
258 if (chan->cl->tx_block && chan->active_req) {
259 unsigned long wait;
260 int ret;
261
262 if (!chan->cl->tx_tout) /* wait forever */
263 wait = msecs_to_jiffies(3600000);
264 else
265 wait = msecs_to_jiffies(chan->cl->tx_tout);
266
267 ret = wait_for_completion_timeout(&chan->tx_complete, wait);
268 if (ret == 0) {
269 t = -EIO;
270 tx_tick(chan, -EIO);
271 }
272 }
273
274 return t;
275}
276EXPORT_SYMBOL_GPL(mbox_send_message);
277
278/**
279 * mbox_request_channel - Request a mailbox channel.
280 * @cl: Identity of the client requesting the channel.
281 * @index: Index of mailbox specifier in 'mboxes' property.
282 *
283 * The Client specifies its requirements and capabilities while asking for
284 * a mailbox channel. It can't be called from atomic context.
285 * The channel is exclusively allocated and can't be used by another
286 * client before the owner calls mbox_free_channel.
287 * After assignment, any packet received on this channel will be
288 * handed over to the client via the 'rx_callback'.
289 * The framework holds reference to the client, so the mbox_client
290 * structure shouldn't be modified until the mbox_free_channel returns.
291 *
292 * Return: Pointer to the channel assigned to the client if successful.
293 * ERR_PTR for request failure.
294 */
295struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
296{
297 struct device *dev = cl->dev;
298 struct mbox_controller *mbox;
299 struct of_phandle_args spec;
300 struct mbox_chan *chan;
301 unsigned long flags;
302 int ret;
303
304 if (!dev || !dev->of_node) {
305 pr_debug("%s: No owner device node\n", __func__);
306 return ERR_PTR(-ENODEV);
307 }
308
309 mutex_lock(&con_mutex);
310
311 if (of_parse_phandle_with_args(dev->of_node, "mboxes",
312 "#mbox-cells", index, &spec)) {
313 dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
314 mutex_unlock(&con_mutex);
315 return ERR_PTR(-ENODEV);
316 }
317
318 chan = NULL;
319 list_for_each_entry(mbox, &mbox_cons, node)
320 if (mbox->dev->of_node == spec.np) {
321 chan = mbox->of_xlate(mbox, &spec);
322 break;
323 }
324
325 of_node_put(spec.np);
326
327 if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) {
328 dev_dbg(dev, "%s: mailbox not free\n", __func__);
329 mutex_unlock(&con_mutex);
330 return ERR_PTR(-EBUSY);
331 }
332
333 spin_lock_irqsave(&chan->lock, flags);
334 chan->msg_free = 0;
335 chan->msg_count = 0;
336 chan->active_req = NULL;
337 chan->cl = cl;
338 init_completion(&chan->tx_complete);
339
340 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
341 chan->txdone_method |= TXDONE_BY_ACK;
342
343 spin_unlock_irqrestore(&chan->lock, flags);
344
345 ret = chan->mbox->ops->startup(chan);
346 if (ret) {
347 dev_err(dev, "Unable to startup the chan (%d)\n", ret);
348 mbox_free_channel(chan);
349 chan = ERR_PTR(ret);
350 }
351
352 mutex_unlock(&con_mutex);
353 return chan;
354}
355EXPORT_SYMBOL_GPL(mbox_request_channel);
356
357/**
358 * mbox_free_channel - The client relinquishes control of a mailbox
359 * channel by this call.
360 * @chan: The mailbox channel to be freed.
361 */
362void mbox_free_channel(struct mbox_chan *chan)
363{
364 unsigned long flags;
365
366 if (!chan || !chan->cl)
367 return;
368
369 chan->mbox->ops->shutdown(chan);
370
371 /* The queued TX requests are simply aborted, no callbacks are made */
372 spin_lock_irqsave(&chan->lock, flags);
373 chan->cl = NULL;
374 chan->active_req = NULL;
375 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
376 chan->txdone_method = TXDONE_BY_POLL;
377
378 module_put(chan->mbox->dev->driver->owner);
379 spin_unlock_irqrestore(&chan->lock, flags);
380}
381EXPORT_SYMBOL_GPL(mbox_free_channel);
382
383static struct mbox_chan *
384of_mbox_index_xlate(struct mbox_controller *mbox,
385 const struct of_phandle_args *sp)
386{
387 int ind = sp->args[0];
388
389 if (ind >= mbox->num_chans)
390 return NULL;
391
392 return &mbox->chans[ind];
393}
394
395/**
396 * mbox_controller_register - Register the mailbox controller
397 * @mbox: Pointer to the mailbox controller.
398 *
399 * The controller driver registers its communication channels
400 */
401int mbox_controller_register(struct mbox_controller *mbox)
402{
403 int i, txdone;
404
405 /* Sanity check */
406 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
407 return -EINVAL;
408
409 if (mbox->txdone_irq)
410 txdone = TXDONE_BY_IRQ;
411 else if (mbox->txdone_poll)
412 txdone = TXDONE_BY_POLL;
413 else /* It has to be ACK then */
414 txdone = TXDONE_BY_ACK;
415
416 if (txdone == TXDONE_BY_POLL) {
417 mbox->poll.function = &poll_txdone;
418 mbox->poll.data = (unsigned long)mbox;
419 init_timer(&mbox->poll);
420 }
421
422 for (i = 0; i < mbox->num_chans; i++) {
423 struct mbox_chan *chan = &mbox->chans[i];
424
425 chan->cl = NULL;
426 chan->mbox = mbox;
427 chan->txdone_method = txdone;
428 spin_lock_init(&chan->lock);
429 }
430
431 if (!mbox->of_xlate)
432 mbox->of_xlate = of_mbox_index_xlate;
433
434 mutex_lock(&con_mutex);
435 list_add_tail(&mbox->node, &mbox_cons);
436 mutex_unlock(&con_mutex);
437
438 return 0;
439}
440EXPORT_SYMBOL_GPL(mbox_controller_register);
441
442/**
443 * mbox_controller_unregister - Unregister the mailbox controller
444 * @mbox: Pointer to the mailbox controller.
445 */
446void mbox_controller_unregister(struct mbox_controller *mbox)
447{
448 int i;
449
450 if (!mbox)
451 return;
452
453 mutex_lock(&con_mutex);
454
455 list_del(&mbox->node);
456
457 for (i = 0; i < mbox->num_chans; i++)
458 mbox_free_channel(&mbox->chans[i]);
459
460 if (mbox->txdone_poll)
461 del_timer_sync(&mbox->poll);
462
463 mutex_unlock(&con_mutex);
464}
465EXPORT_SYMBOL_GPL(mbox_controller_unregister);
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index d873cbae2fbb..f3755e0aa935 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -26,7 +26,7 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/amba/bus.h> 27#include <linux/amba/bus.h>
28 28
29#include <linux/mailbox.h> 29#include <linux/pl320-ipc.h>
30 30
31#define IPCMxSOURCE(m) ((m) * 0x40) 31#define IPCMxSOURCE(m) ((m) * 0x40)
32#define IPCMxDSET(m) (((m) * 0x40) + 0x004) 32#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index 97afee672d07..4418119cf707 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -364,6 +364,9 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
364 goto out; 364 goto out;
365 } 365 }
366 366
367 /* create a nice device name */
368 sprintf(dev->name, "saa7146 (%d)", saa7146_num);
369
367 DEB_EE("pci:%p\n", pci); 370 DEB_EE("pci:%p\n", pci);
368 371
369 err = pci_enable_device(pci); 372 err = pci_enable_device(pci);
@@ -438,9 +441,6 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
438 441
439 /* the rest + print status message */ 442 /* the rest + print status message */
440 443
441 /* create a nice device name */
442 sprintf(dev->name, "saa7146 (%d)", saa7146_num);
443
444 pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n", 444 pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n",
445 dev->mem, dev->revision, pci->irq, 445 dev->mem, dev->revision, pci->irq,
446 pci->subsystem_vendor, pci->subsystem_device); 446 pci->subsystem_vendor, pci->subsystem_device);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 13734b8c7917..4cb90317ff45 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1600,6 +1600,7 @@ static int dvb_register(struct cx23885_tsport *port)
1600 break; 1600 break;
1601 1601
1602 /* attach tuner */ 1602 /* attach tuner */
1603 memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
1603 m88ts2022_config.fe = fe0->dvb.frontend; 1604 m88ts2022_config.fe = fe0->dvb.frontend;
1604 m88ts2022_config.clock = 27000000; 1605 m88ts2022_config.clock = 27000000;
1605 memset(&info, 0, sizeof(struct i2c_board_info)); 1606 memset(&info, 0, sizeof(struct i2c_board_info));
@@ -1635,6 +1636,7 @@ static int dvb_register(struct cx23885_tsport *port)
1635 /* port c - terrestrial/cable */ 1636 /* port c - terrestrial/cable */
1636 case 2: 1637 case 2:
1637 /* attach frontend */ 1638 /* attach frontend */
1639 memset(&si2168_config, 0, sizeof(si2168_config));
1638 si2168_config.i2c_adapter = &adapter; 1640 si2168_config.i2c_adapter = &adapter;
1639 si2168_config.fe = &fe0->dvb.frontend; 1641 si2168_config.fe = &fe0->dvb.frontend;
1640 si2168_config.ts_mode = SI2168_TS_SERIAL; 1642 si2168_config.ts_mode = SI2168_TS_SERIAL;
@@ -1654,6 +1656,7 @@ static int dvb_register(struct cx23885_tsport *port)
1654 port->i2c_client_demod = client_demod; 1656 port->i2c_client_demod = client_demod;
1655 1657
1656 /* attach tuner */ 1658 /* attach tuner */
1659 memset(&si2157_config, 0, sizeof(si2157_config));
1657 si2157_config.fe = fe0->dvb.frontend; 1660 si2157_config.fe = fe0->dvb.frontend;
1658 memset(&info, 0, sizeof(struct i2c_board_info)); 1661 memset(&info, 0, sizeof(struct i2c_board_info));
1659 strlcpy(info.type, "si2157", I2C_NAME_SIZE); 1662 strlcpy(info.type, "si2157", I2C_NAME_SIZE);
diff --git a/drivers/media/pci/tw68/Kconfig b/drivers/media/pci/tw68/Kconfig
index 5425ba1e320d..95d5d5202048 100644
--- a/drivers/media/pci/tw68/Kconfig
+++ b/drivers/media/pci/tw68/Kconfig
@@ -1,7 +1,6 @@
1config VIDEO_TW68 1config VIDEO_TW68
2 tristate "Techwell tw68x Video For Linux" 2 tristate "Techwell tw68x Video For Linux"
3 depends on VIDEO_DEV && PCI && VIDEO_V4L2 3 depends on VIDEO_DEV && PCI && VIDEO_V4L2
4 select I2C_ALGOBIT
5 select VIDEOBUF2_DMA_SG 4 select VIDEOBUF2_DMA_SG
6 ---help--- 5 ---help---
7 Support for Techwell tw68xx based frame grabber boards. 6 Support for Techwell tw68xx based frame grabber boards.
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index a6fb48cf7aae..63f0b64057cb 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -306,7 +306,7 @@ static int tw68_initdev(struct pci_dev *pci_dev,
306 306
307 /* get irq */ 307 /* get irq */
308 err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq, 308 err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
309 IRQF_SHARED | IRQF_DISABLED, dev->name, dev); 309 IRQF_SHARED, dev->name, dev);
310 if (err < 0) { 310 if (err < 0) {
311 pr_err("%s: can't get IRQ %d\n", 311 pr_err("%s: can't get IRQ %d\n",
312 dev->name, pci_dev->irq); 312 dev->name, pci_dev->irq);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index bee9074ebc13..3aac88f1d54a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -166,7 +166,7 @@ config VIDEO_MEM2MEM_DEINTERLACE
166config VIDEO_SAMSUNG_S5P_G2D 166config VIDEO_SAMSUNG_S5P_G2D
167 tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver" 167 tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
168 depends on VIDEO_DEV && VIDEO_V4L2 168 depends on VIDEO_DEV && VIDEO_V4L2
169 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 169 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
170 depends on HAS_DMA 170 depends on HAS_DMA
171 select VIDEOBUF2_DMA_CONTIG 171 select VIDEOBUF2_DMA_CONTIG
172 select V4L2_MEM2MEM_DEV 172 select V4L2_MEM2MEM_DEV
@@ -178,7 +178,7 @@ config VIDEO_SAMSUNG_S5P_G2D
178config VIDEO_SAMSUNG_S5P_JPEG 178config VIDEO_SAMSUNG_S5P_JPEG
179 tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver" 179 tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
180 depends on VIDEO_DEV && VIDEO_V4L2 180 depends on VIDEO_DEV && VIDEO_V4L2
181 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 181 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
182 depends on HAS_DMA 182 depends on HAS_DMA
183 select VIDEOBUF2_DMA_CONTIG 183 select VIDEOBUF2_DMA_CONTIG
184 select V4L2_MEM2MEM_DEV 184 select V4L2_MEM2MEM_DEV
@@ -189,7 +189,7 @@ config VIDEO_SAMSUNG_S5P_JPEG
189config VIDEO_SAMSUNG_S5P_MFC 189config VIDEO_SAMSUNG_S5P_MFC
190 tristate "Samsung S5P MFC Video Codec" 190 tristate "Samsung S5P MFC Video Codec"
191 depends on VIDEO_DEV && VIDEO_V4L2 191 depends on VIDEO_DEV && VIDEO_V4L2
192 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 192 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
193 depends on HAS_DMA 193 depends on HAS_DMA
194 select VIDEOBUF2_DMA_CONTIG 194 select VIDEOBUF2_DMA_CONTIG
195 default n 195 default n
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 77c951237744..b7b2e472240a 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -2,7 +2,7 @@
2config VIDEO_SAMSUNG_EXYNOS4_IS 2config VIDEO_SAMSUNG_EXYNOS4_IS
3 bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver" 3 bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
4 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 4 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
5 depends on (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) 5 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 help 7 help
8 Say Y here to enable camera host interface devices for 8 Say Y here to enable camera host interface devices for
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index b70fd996d794..aee92d908e49 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -832,6 +832,7 @@ err:
832 return -ENXIO; 832 return -ENXIO;
833} 833}
834 834
835#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
835static int fimc_m2m_suspend(struct fimc_dev *fimc) 836static int fimc_m2m_suspend(struct fimc_dev *fimc)
836{ 837{
837 unsigned long flags; 838 unsigned long flags;
@@ -870,6 +871,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
870 871
871 return 0; 872 return 0;
872} 873}
874#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */
873 875
874static const struct of_device_id fimc_of_match[]; 876static const struct of_device_id fimc_of_match[];
875 877
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index e525a7c8d885..6fcc7f072ace 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -893,7 +893,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
893 unsigned long buffer, unsigned long size, 893 unsigned long buffer, unsigned long size,
894 struct s5p_jpeg_ctx *ctx) 894 struct s5p_jpeg_ctx *ctx)
895{ 895{
896 int c, components, notfound; 896 int c, components = 0, notfound;
897 unsigned int height, width, word, subsampling = 0; 897 unsigned int height, width, word, subsampling = 0;
898 long length; 898 long length;
899 struct s5p_jpeg_buffer jpeg_buffer; 899 struct s5p_jpeg_buffer jpeg_buffer;
@@ -2632,6 +2632,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
2632 return 0; 2632 return 0;
2633} 2633}
2634 2634
2635#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
2635static int s5p_jpeg_runtime_suspend(struct device *dev) 2636static int s5p_jpeg_runtime_suspend(struct device *dev)
2636{ 2637{
2637 struct s5p_jpeg *jpeg = dev_get_drvdata(dev); 2638 struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
@@ -2681,7 +2682,9 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
2681 2682
2682 return 0; 2683 return 0;
2683} 2684}
2685#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */
2684 2686
2687#ifdef CONFIG_PM_SLEEP
2685static int s5p_jpeg_suspend(struct device *dev) 2688static int s5p_jpeg_suspend(struct device *dev)
2686{ 2689{
2687 if (pm_runtime_suspended(dev)) 2690 if (pm_runtime_suspended(dev))
@@ -2697,6 +2700,7 @@ static int s5p_jpeg_resume(struct device *dev)
2697 2700
2698 return s5p_jpeg_runtime_resume(dev); 2701 return s5p_jpeg_runtime_resume(dev);
2699} 2702}
2703#endif
2700 2704
2701static const struct dev_pm_ops s5p_jpeg_pm_ops = { 2705static const struct dev_pm_ops s5p_jpeg_pm_ops = {
2702 SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume) 2706 SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index a9d56f8936b4..beb180e71ba0 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -9,7 +9,7 @@
9config VIDEO_SAMSUNG_S5P_TV 9config VIDEO_SAMSUNG_S5P_TV
10 bool "Samsung TV driver for S5P platform" 10 bool "Samsung TV driver for S5P platform"
11 depends on PM_RUNTIME 11 depends on PM_RUNTIME
12 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 12 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
13 default n 13 default n
14 ---help--- 14 ---help---
15 Say Y here to enable selecting the TV output devices for 15 Say Y here to enable selecting the TV output devices for
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index d71139a2ae00..c3090932f06d 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -1,8 +1,11 @@
1config VIDEO_VIVID 1config VIDEO_VIVID
2 tristate "Virtual Video Test Driver" 2 tristate "Virtual Video Test Driver"
3 depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 3 depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB
4 select FONT_SUPPORT 4 select FONT_SUPPORT
5 select FONT_8x16 5 select FONT_8x16
6 select FB_CFB_FILLRECT
7 select FB_CFB_COPYAREA
8 select FB_CFB_IMAGEBLIT
6 select VIDEOBUF2_VMALLOC 9 select VIDEOBUF2_VMALLOC
7 default n 10 default n
8 ---help--- 11 ---help---
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index 0c6fa53fa646..cbcd6250e7b2 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -136,7 +136,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
136 tpg->black_line[plane] = vzalloc(max_w * pixelsz); 136 tpg->black_line[plane] = vzalloc(max_w * pixelsz);
137 if (!tpg->black_line[plane]) 137 if (!tpg->black_line[plane])
138 return -ENOMEM; 138 return -ENOMEM;
139 tpg->random_line[plane] = vzalloc(max_w * pixelsz); 139 tpg->random_line[plane] = vzalloc(max_w * 2 * pixelsz);
140 if (!tpg->random_line[plane]) 140 if (!tpg->random_line[plane])
141 return -ENOMEM; 141 return -ENOMEM;
142 } 142 }
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 6f28f6e02ea5..704397f3c106 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1256,7 +1256,7 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
1256 fmerr("Unable to read firmware(%s) content\n", fw_name); 1256 fmerr("Unable to read firmware(%s) content\n", fw_name);
1257 return ret; 1257 return ret;
1258 } 1258 }
1259 fmdbg("Firmware(%s) length : %d bytes\n", fw_name, fw_entry->size); 1259 fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
1260 1260
1261 fw_data = (void *)fw_entry->data; 1261 fw_data = (void *)fw_entry->data;
1262 fw_len = fw_entry->size; 1262 fw_len = fw_entry->size;
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e44c8aba6074..803a0e63d47e 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1333,9 +1333,9 @@ static int xc5000_release(struct dvb_frontend *fe)
1333 1333
1334 if (priv) { 1334 if (priv) {
1335 cancel_delayed_work(&priv->timer_sleep); 1335 cancel_delayed_work(&priv->timer_sleep);
1336 hybrid_tuner_release_state(priv);
1337 if (priv->firmware) 1336 if (priv->firmware)
1338 release_firmware(priv->firmware); 1337 release_firmware(priv->firmware);
1338 hybrid_tuner_release_state(priv);
1339 } 1339 }
1340 1340
1341 mutex_unlock(&xc5000_list_mutex); 1341 mutex_unlock(&xc5000_list_mutex);
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 00758c83eec7..1896ab218b11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -193,8 +193,8 @@ static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
193 return af9035_wr_regs(d, reg, &val, 1); 193 return af9035_wr_regs(d, reg, &val, 1);
194} 194}
195 195
196static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, 196static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
197 void *platform_data, struct i2c_adapter *adapter) 197 u8 addr, void *platform_data, struct i2c_adapter *adapter)
198{ 198{
199 int ret, num; 199 int ret, num;
200 struct state *state = d_to_priv(d); 200 struct state *state = d_to_priv(d);
@@ -221,7 +221,7 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
221 goto err; 221 goto err;
222 } 222 }
223 223
224 request_module(board_info.type); 224 request_module("%s", board_info.type);
225 225
226 /* register I2C device */ 226 /* register I2C device */
227 client = i2c_new_device(adapter, &board_info); 227 client = i2c_new_device(adapter, &board_info);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index d3c5f230e97a..ae917c042a52 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -630,8 +630,8 @@ error:
630 return ret; 630 return ret;
631} 631}
632 632
633static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, 633static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type,
634 void *platform_data) 634 u8 addr, void *platform_data)
635{ 635{
636 int ret, num; 636 int ret, num;
637 struct anysee_state *state = d_to_priv(d); 637 struct anysee_state *state = d_to_priv(d);
@@ -659,7 +659,7 @@ static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
659 goto err; 659 goto err;
660 } 660 }
661 661
662 request_module(board_info.type); 662 request_module("%s", board_info.type);
663 663
664 /* register I2C device */ 664 /* register I2C device */
665 client = i2c_new_device(adapter, &board_info); 665 client = i2c_new_device(adapter, &board_info);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index b5e52fe7957a..901cf2b952d7 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(em28xx_audio_analog_set);
504int em28xx_audio_setup(struct em28xx *dev) 504int em28xx_audio_setup(struct em28xx *dev)
505{ 505{
506 int vid1, vid2, feat, cfg; 506 int vid1, vid2, feat, cfg;
507 u32 vid; 507 u32 vid = 0;
508 u8 i2s_samplerates; 508 u8 i2s_samplerates;
509 509
510 if (dev->chip_id == CHIP_ID_EM2870 || 510 if (dev->chip_id == CHIP_ID_EM2870 ||
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 581f6dad4ca9..23f8f6afa2e0 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -712,8 +712,10 @@ static int em28xx_ir_init(struct em28xx *dev)
712 em28xx_info("Registering input extension\n"); 712 em28xx_info("Registering input extension\n");
713 713
714 ir = kzalloc(sizeof(*ir), GFP_KERNEL); 714 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
715 if (!ir)
716 return -ENOMEM;
715 rc = rc_allocate_device(); 717 rc = rc_allocate_device();
716 if (!ir || !rc) 718 if (!rc)
717 goto error; 719 goto error;
718 720
719 /* record handles to ourself */ 721 /* record handles to ourself */
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 328b5ba47a0a..fd1fa412e094 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -932,7 +932,7 @@ static int hackrf_set_bandwidth(struct hackrf_dev *dev)
932 dev->bandwidth->val = bandwidth; 932 dev->bandwidth->val = bandwidth;
933 dev->bandwidth->cur.val = bandwidth; 933 dev->bandwidth->cur.val = bandwidth;
934 934
935 dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq); 935 dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth);
936 936
937 u16tmp = 0; 937 u16tmp = 0;
938 u16tmp |= ((bandwidth >> 0) & 0xff) << 0; 938 u16tmp |= ((bandwidth >> 0) & 0xff) << 0;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 68bc9615660e..9bfa041e3316 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -446,6 +446,7 @@ static int usbvision_v4l2_close(struct file *file)
446 if (usbvision->remove_pending) { 446 if (usbvision->remove_pending) {
447 printk(KERN_INFO "%s: Final disconnect\n", __func__); 447 printk(KERN_INFO "%s: Final disconnect\n", __func__);
448 usbvision_release(usbvision); 448 usbvision_release(usbvision);
449 return 0;
449 } 450 }
450 mutex_unlock(&usbvision->v4l2_lock); 451 mutex_unlock(&usbvision->v4l2_lock);
451 452
@@ -1221,6 +1222,7 @@ static int usbvision_radio_close(struct file *file)
1221 if (usbvision->remove_pending) { 1222 if (usbvision->remove_pending) {
1222 printk(KERN_INFO "%s: Final disconnect\n", __func__); 1223 printk(KERN_INFO "%s: Final disconnect\n", __func__);
1223 usbvision_release(usbvision); 1224 usbvision_release(usbvision);
1225 return err_code;
1224 } 1226 }
1225 1227
1226 mutex_unlock(&usbvision->v4l2_lock); 1228 mutex_unlock(&usbvision->v4l2_lock);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 60a8e2c3631e..378ae02e593b 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -318,7 +318,6 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
318 stream->ctrl = probe; 318 stream->ctrl = probe;
319 stream->cur_format = format; 319 stream->cur_format = format;
320 stream->cur_frame = frame; 320 stream->cur_frame = frame;
321 stream->frame_size = fmt->fmt.pix.sizeimage;
322 321
323done: 322done:
324 mutex_unlock(&stream->mutex); 323 mutex_unlock(&stream->mutex);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 9ace520bb079..df81b9c4faf1 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1143,7 +1143,7 @@ static int uvc_video_encode_data(struct uvc_streaming *stream,
1143static void uvc_video_validate_buffer(const struct uvc_streaming *stream, 1143static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
1144 struct uvc_buffer *buf) 1144 struct uvc_buffer *buf)
1145{ 1145{
1146 if (stream->frame_size != buf->bytesused && 1146 if (stream->ctrl.dwMaxVideoFrameSize != buf->bytesused &&
1147 !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED)) 1147 !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED))
1148 buf->error = 1; 1148 buf->error = 1;
1149} 1149}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 6f676c29ec09..864ada740360 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -457,7 +457,6 @@ struct uvc_streaming {
457 struct uvc_format *def_format; 457 struct uvc_format *def_format;
458 struct uvc_format *cur_format; 458 struct uvc_format *cur_format;
459 struct uvc_frame *cur_frame; 459 struct uvc_frame *cur_frame;
460 size_t frame_size;
461 460
462 /* Protect access to ctrl, cur_format, cur_frame and hardware video 461 /* Protect access to ctrl, cur_format, cur_frame and hardware video
463 * probe control. 462 * probe control.
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index bf80f0f7dfb8..e02353e340dd 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -305,6 +305,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
305 /* Try to remap memory */ 305 /* Try to remap memory */
306 size = vma->vm_end - vma->vm_start; 306 size = vma->vm_end - vma->vm_start;
307 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 307 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
308
309 /* the "vm_pgoff" is just used in v4l2 to find the
310 * corresponding buffer data structure which is allocated
311 * earlier and it does not mean the offset from the physical
312 * buffer start address as usual. So set it to 0 to pass
313 * the sanity check in vm_iomap_memory().
314 */
315 vma->vm_pgoff = 0;
316
308 retval = vm_iomap_memory(vma, mem->dma_handle, size); 317 retval = vm_iomap_memory(vma, mem->dma_handle, size);
309 if (retval) { 318 if (retval) {
310 dev_err(q->dev, "mmap: remap failed with error %d. ", 319 dev_err(q->dev, "mmap: remap failed with error %d. ",
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 69506ebd4d07..c99e896604ee 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -21,60 +21,64 @@
21 21
22#include "cxl.h" 22#include "cxl.h"
23 23
24static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, 24static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
25 bool sec_hash,
26 struct cxl_sste *secondary_group,
27 unsigned int *lru)
28{ 25{
29 unsigned int i, entry; 26 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
30 struct cxl_sste *sste, *group = primary_group; 27 (sste->esid_data == cpu_to_be64(slb->esid)));
31 28}
32 for (i = 0; i < 2; i++) { 29
33 for (entry = 0; entry < 8; entry++) { 30/*
34 sste = group + entry; 31 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
35 if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) 32 * the segment table.
36 return sste; 33 */
37 } 34static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
38 if (!sec_hash) 35 struct copro_slb *slb)
39 break; 36{
40 group = secondary_group; 37 struct cxl_sste *primary, *sste, *ret = NULL;
38 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
39 unsigned int entry;
40 unsigned int hash;
41
42 if (slb->vsid & SLB_VSID_B_1T)
43 hash = (slb->esid >> SID_SHIFT_1T) & mask;
44 else /* 256M */
45 hash = (slb->esid >> SID_SHIFT) & mask;
46
47 primary = ctx->sstp + (hash << 3);
48
49 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
50 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
51 ret = sste;
52 if (sste_matches(sste, slb))
53 return NULL;
41 } 54 }
55 if (ret)
56 return ret;
57
42 /* Nothing free, select an entry to cast out */ 58 /* Nothing free, select an entry to cast out */
43 if (sec_hash && (*lru & 0x8)) 59 ret = primary + ctx->sst_lru;
44 sste = secondary_group + (*lru & 0x7); 60 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
45 else
46 sste = primary_group + (*lru & 0x7);
47 *lru = (*lru + 1) & 0xf;
48 61
49 return sste; 62 return ret;
50} 63}
51 64
52static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) 65static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
53{ 66{
54 /* mask is the group index, we search primary and secondary here. */ 67 /* mask is the group index, we search primary and secondary here. */
55 unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
56 bool sec_hash = 1;
57 struct cxl_sste *sste; 68 struct cxl_sste *sste;
58 unsigned int hash;
59 unsigned long flags; 69 unsigned long flags;
60 70
61
62 sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
63
64 if (slb->vsid & SLB_VSID_B_1T)
65 hash = (slb->esid >> SID_SHIFT_1T) & mask;
66 else /* 256M */
67 hash = (slb->esid >> SID_SHIFT) & mask;
68
69 spin_lock_irqsave(&ctx->sste_lock, flags); 71 spin_lock_irqsave(&ctx->sste_lock, flags);
70 sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash, 72 sste = find_free_sste(ctx, slb);
71 ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru); 73 if (!sste)
74 goto out_unlock;
72 75
73 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", 76 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
74 sste - ctx->sstp, slb->vsid, slb->esid); 77 sste - ctx->sstp, slb->vsid, slb->esid);
75 78
76 sste->vsid_data = cpu_to_be64(slb->vsid); 79 sste->vsid_data = cpu_to_be64(slb->vsid);
77 sste->esid_data = cpu_to_be64(slb->esid); 80 sste->esid_data = cpu_to_be64(slb->esid);
81out_unlock:
78 spin_unlock_irqrestore(&ctx->sste_lock, flags); 82 spin_unlock_irqrestore(&ctx->sste_lock, flags);
79} 83}
80 84
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 623286a77114..d47532e8f4f1 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -417,7 +417,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
417 ctx->elem->haurp = 0; /* disable */ 417 ctx->elem->haurp = 0; /* disable */
418 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); 418 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
419 419
420 sr = CXL_PSL_SR_An_SC; 420 sr = 0;
421 if (ctx->master) 421 if (ctx->master)
422 sr |= CXL_PSL_SR_An_MP; 422 sr |= CXL_PSL_SR_An_MP;
423 if (mfspr(SPRN_LPCR) & LPCR_TC) 423 if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -508,7 +508,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
508 u64 sr; 508 u64 sr;
509 int rc; 509 int rc;
510 510
511 sr = CXL_PSL_SR_An_SC; 511 sr = 0;
512 set_endian(sr); 512 set_endian(sr);
513 if (ctx->master) 513 if (ctx->master)
514 sr |= CXL_PSL_SR_An_MP; 514 sr |= CXL_PSL_SR_An_MP;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4706386b7d34..f9009be3f307 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -135,6 +135,7 @@ config MACVLAN
135config MACVTAP 135config MACVTAP
136 tristate "MAC-VLAN based tap driver" 136 tristate "MAC-VLAN based tap driver"
137 depends on MACVLAN 137 depends on MACVLAN
138 depends on INET
138 help 139 help
139 This adds a specialized tap character device driver that is based 140 This adds a specialized tap character device driver that is based
140 on the MAC-VLAN network interface, called macvtap. A macvtap device 141 on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -200,6 +201,7 @@ config RIONET_RX_SIZE
200 201
201config TUN 202config TUN
202 tristate "Universal TUN/TAP device driver support" 203 tristate "Universal TUN/TAP device driver support"
204 depends on INET
203 select CRC32 205 select CRC32
204 ---help--- 206 ---help---
205 TUN/TAP provides packet reception and transmission for user space 207 TUN/TAP provides packet reception and transmission for user space
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 1020a7af67cf..78d8e876f3aa 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -395,7 +395,7 @@ static int mv88e6171_get_sset_count(struct dsa_switch *ds)
395} 395}
396 396
397struct dsa_switch_driver mv88e6171_switch_driver = { 397struct dsa_switch_driver mv88e6171_switch_driver = {
398 .tag_protocol = DSA_TAG_PROTO_DSA, 398 .tag_protocol = DSA_TAG_PROTO_EDSA,
399 .priv_size = sizeof(struct mv88e6xxx_priv_state), 399 .priv_size = sizeof(struct mv88e6xxx_priv_state),
400 .probe = mv88e6171_probe, 400 .probe = mv88e6171_probe,
401 .setup = mv88e6171_setup, 401 .setup = mv88e6171_setup,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index e6d24c210198..c22f32622fa9 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
124{ 124{
125 struct net_device *ndev = p->ndev; 125 struct net_device *ndev = p->ndev;
126 u32 data; 126 u32 data;
127 int i; 127 int i = 0;
128 128
129 xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); 129 xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
130 for (i = 0; i < 10 && data != ~0U ; i++) { 130 do {
131 usleep_range(100, 110); 131 usleep_range(100, 110);
132 data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); 132 data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
133 } 133 if (data == ~0U)
134 return 0;
135 } while (++i < 10);
134 136
135 if (data != ~0U) { 137 netdev_err(ndev, "Failed to release memory from shutdown\n");
136 netdev_err(ndev, "Failed to release memory from shutdown\n"); 138 return -ENODEV;
137 return -ENODEV;
138 }
139
140 return 0;
141} 139}
142 140
143static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) 141static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9ae36979bdee..3a6778a667f4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1397,6 +1397,9 @@ static void bcm_sysport_netif_start(struct net_device *dev)
1397 /* Enable NAPI */ 1397 /* Enable NAPI */
1398 napi_enable(&priv->napi); 1398 napi_enable(&priv->napi);
1399 1399
1400 /* Enable RX interrupt and TX ring full interrupt */
1401 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1402
1400 phy_start(priv->phydev); 1403 phy_start(priv->phydev);
1401 1404
1402 /* Enable TX interrupts for the 32 TXQs */ 1405 /* Enable TX interrupts for the 32 TXQs */
@@ -1499,9 +1502,6 @@ static int bcm_sysport_open(struct net_device *dev)
1499 if (ret) 1502 if (ret)
1500 goto out_free_rx_ring; 1503 goto out_free_rx_ring;
1501 1504
1502 /* Enable RX interrupt and TX ring full interrupt */
1503 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1504
1505 /* Turn on TDMA */ 1505 /* Turn on TDMA */
1506 ret = tdma_enable_set(priv, 1); 1506 ret = tdma_enable_set(priv, 1);
1507 if (ret) 1507 if (ret)
@@ -1858,6 +1858,8 @@ static int bcm_sysport_resume(struct device *d)
1858 if (!netif_running(dev)) 1858 if (!netif_running(dev))
1859 return 0; 1859 return 0;
1860 1860
1861 umac_reset(priv);
1862
1861 /* We may have been suspended and never received a WOL event that 1863 /* We may have been suspended and never received a WOL event that
1862 * would turn off MPD detection, take care of that now 1864 * would turn off MPD detection, take care of that now
1863 */ 1865 */
@@ -1885,9 +1887,6 @@ static int bcm_sysport_resume(struct device *d)
1885 1887
1886 netif_device_attach(dev); 1888 netif_device_attach(dev);
1887 1889
1888 /* Enable RX interrupt and TX ring full interrupt */
1889 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1890
1891 /* RX pipe enable */ 1890 /* RX pipe enable */
1892 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1891 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1893 1892
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 23f23c97c2ad..f05fab65d78a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
382 if (l5_cid >= MAX_CM_SK_TBL_SZ) 382 if (l5_cid >= MAX_CM_SK_TBL_SZ)
383 break; 383 break;
384 384
385 rcu_read_lock();
386 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { 385 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
387 rc = -ENODEV; 386 rc = -ENODEV;
388 rcu_read_unlock();
389 break; 387 break;
390 } 388 }
391 csk = &cp->csk_tbl[l5_cid]; 389 csk = &cp->csk_tbl[l5_cid];
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
414 } 412 }
415 } 413 }
416 csk_put(csk); 414 csk_put(csk);
417 rcu_read_unlock();
418 rc = 0; 415 rc = 0;
419 } 416 }
420 } 417 }
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
615 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 612 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
616 613
617 mutex_lock(&cnic_lock); 614 mutex_lock(&cnic_lock);
618 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 615 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
619 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 616 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
620 cnic_put(dev); 617 cnic_put(dev);
621 } else { 618 } else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 8edf0f5bd679..6fe300e316c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -60,6 +60,42 @@ void cxgb4_dcb_version_init(struct net_device *dev)
60 dcb->dcb_version = FW_PORT_DCB_VER_AUTO; 60 dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
61} 61}
62 62
63static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
64{
65 struct port_info *pi = netdev2pinfo(dev);
66 struct adapter *adap = pi->adapter;
67 struct port_dcb_info *dcb = &pi->dcb;
68 struct dcb_app app;
69 int i, err;
70
71 /* zero priority implies remove */
72 app.priority = 0;
73
74 for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
75 /* Check if app list is exhausted */
76 if (!dcb->app_priority[i].protocolid)
77 break;
78
79 app.protocol = dcb->app_priority[i].protocolid;
80
81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
82 app.selector = dcb->app_priority[i].sel_field + 1;
83 err = dcb_ieee_setapp(dev, &app);
84 } else {
85 app.selector = !!(dcb->app_priority[i].sel_field);
86 err = dcb_setapp(dev, &app);
87 }
88
89 if (err) {
90 dev_err(adap->pdev_dev,
91 "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
92 dcb_ver_array[dcb->dcb_version], app.selector,
93 app.protocol, -err);
94 break;
95 }
96 }
97}
98
63/* Finite State machine for Data Center Bridging. 99/* Finite State machine for Data Center Bridging.
64 */ 100 */
65void cxgb4_dcb_state_fsm(struct net_device *dev, 101void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -80,7 +116,6 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
80 /* we're going to use Host DCB */ 116 /* we're going to use Host DCB */
81 dcb->state = CXGB4_DCB_STATE_HOST; 117 dcb->state = CXGB4_DCB_STATE_HOST;
82 dcb->supported = CXGB4_DCBX_HOST_SUPPORT; 118 dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
83 dcb->enabled = 1;
84 break; 119 break;
85 } 120 }
86 121
@@ -145,6 +180,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
145 * state. We need to reset back to a ground state 180 * state. We need to reset back to a ground state
146 * of incomplete. 181 * of incomplete.
147 */ 182 */
183 cxgb4_dcb_cleanup_apps(dev);
148 cxgb4_dcb_state_init(dev); 184 cxgb4_dcb_state_init(dev);
149 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; 185 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
150 dcb->supported = CXGB4_DCBX_FW_SUPPORT; 186 dcb->supported = CXGB4_DCBX_FW_SUPPORT;
@@ -349,6 +385,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
349{ 385{
350 struct port_info *pi = netdev2pinfo(dev); 386 struct port_info *pi = netdev2pinfo(dev);
351 387
388 /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
389 if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
390 pi->dcb.enabled = enabled;
391 return 0;
392 }
393
352 /* Firmware doesn't provide any mechanism to control the DCB state. 394 /* Firmware doesn't provide any mechanism to control the DCB state.
353 */ 395 */
354 if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) 396 if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
@@ -833,11 +875,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
833 875
834/* Return whether IEEE Data Center Bridging has been negotiated. 876/* Return whether IEEE Data Center Bridging has been negotiated.
835 */ 877 */
836static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) 878static inline int
879cxgb4_ieee_negotiation_complete(struct net_device *dev,
880 enum cxgb4_dcb_fw_msgs dcb_subtype)
837{ 881{
838 struct port_info *pi = netdev2pinfo(dev); 882 struct port_info *pi = netdev2pinfo(dev);
839 struct port_dcb_info *dcb = &pi->dcb; 883 struct port_dcb_info *dcb = &pi->dcb;
840 884
885 if (dcb_subtype && !(dcb->msgs & dcb_subtype))
886 return 0;
887
841 return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && 888 return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
842 (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); 889 (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
843} 890}
@@ -850,7 +897,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
850{ 897{
851 int prio; 898 int prio;
852 899
853 if (!cxgb4_ieee_negotiation_complete(dev)) 900 if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
854 return -EINVAL; 901 return -EINVAL;
855 if (!(app->selector && app->protocol)) 902 if (!(app->selector && app->protocol))
856 return -EINVAL; 903 return -EINVAL;
@@ -872,7 +919,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
872{ 919{
873 int ret; 920 int ret;
874 921
875 if (!cxgb4_ieee_negotiation_complete(dev)) 922 if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
876 return -EINVAL; 923 return -EINVAL;
877 if (!(app->selector && app->protocol)) 924 if (!(app->selector && app->protocol))
878 return -EINVAL; 925 return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3f60070f2519..8520d5529df8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
694#ifdef CONFIG_CHELSIO_T4_DCB 694#ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev); 695 struct port_info *pi = netdev_priv(dev);
696 696
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; 697 if (!pi->dcb.enabled)
698 return 0;
699
700 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
701 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
698#else 702#else
699 return 0; 703 return 0;
700#endif 704#endif
@@ -6610,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6610 6614
6611 spin_lock_init(&adapter->stats_lock); 6615 spin_lock_init(&adapter->stats_lock);
6612 spin_lock_init(&adapter->tid_release_lock); 6616 spin_lock_init(&adapter->tid_release_lock);
6617 spin_lock_init(&adapter->win0_lock);
6613 6618
6614 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); 6619 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6615 INIT_WORK(&adapter->db_full_task, process_db_full); 6620 INIT_WORK(&adapter->db_full_task, process_db_full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index bfa398d91826..0b42bddaf284 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
2929 CH_DEVICE(0x480d), /* T480-cr */ 2929 CH_DEVICE(0x480d), /* T480-cr */
2930 CH_DEVICE(0x480e), /* T440-lp-cr */ 2930 CH_DEVICE(0x480e), /* T440-lp-cr */
2931 CH_DEVICE(0x4880), 2931 CH_DEVICE(0x4880),
2932 CH_DEVICE(0x4880), 2932 CH_DEVICE(0x4881),
2933 CH_DEVICE(0x4880), 2933 CH_DEVICE(0x4882),
2934 CH_DEVICE(0x4880), 2934 CH_DEVICE(0x4883),
2935 CH_DEVICE(0x4880), 2935 CH_DEVICE(0x4884),
2936 CH_DEVICE(0x4880), 2936 CH_DEVICE(0x4885),
2937 CH_DEVICE(0x4880), 2937 CH_DEVICE(0x4886),
2938 CH_DEVICE(0x4880), 2938 CH_DEVICE(0x4887),
2939 CH_DEVICE(0x4880), 2939 CH_DEVICE(0x4888),
2940 CH_DEVICE(0x5801), /* T520-cr */ 2940 CH_DEVICE(0x5801), /* T520-cr */
2941 CH_DEVICE(0x5802), /* T522-cr */ 2941 CH_DEVICE(0x5802), /* T522-cr */
2942 CH_DEVICE(0x5803), /* T540-cr */ 2942 CH_DEVICE(0x5803), /* T540-cr */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 5f6aded512f5..24f3986cfae2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1075 NETIF_F_HW_CSUM | 1075 NETIF_F_HW_CSUM |
1076 NETIF_F_SG); 1076 NETIF_F_SG);
1077 1077
1078 netdev->priv_flags |= IFF_UNICAST_FLT; 1078 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1079 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1080 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1081 netdev->priv_flags |= IFF_UNICAST_FLT;
1079 1082
1080 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1083 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1081 1084
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da3f3a704b08..834c9ffc6267 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6163,7 +6163,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6163 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6163 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6164 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6164 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6165 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6165 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6166 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> 6166 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6167 I40E_GL_MDET_TX_EVENT_SHIFT; 6167 I40E_GL_MDET_TX_EVENT_SHIFT;
6168 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6168 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6169 I40E_GL_MDET_TX_QUEUE_SHIFT; 6169 I40E_GL_MDET_TX_QUEUE_SHIFT;
@@ -6177,7 +6177,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6177 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6177 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6178 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6178 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6179 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6179 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6180 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> 6180 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6181 I40E_GL_MDET_RX_EVENT_SHIFT; 6181 I40E_GL_MDET_RX_EVENT_SHIFT;
6182 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6182 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6183 I40E_GL_MDET_RX_QUEUE_SHIFT; 6183 I40E_GL_MDET_RX_QUEUE_SHIFT;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a21b14495ebd..a2d72a87cbde 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6537 if (unlikely(page_to_nid(page) != numa_node_id())) 6537 if (unlikely(page_to_nid(page) != numa_node_id()))
6538 return false; 6538 return false;
6539 6539
6540 if (unlikely(page->pfmemalloc))
6541 return false;
6542
6540#if (PAGE_SIZE < 8192) 6543#if (PAGE_SIZE < 8192)
6541 /* if we are only owner of page we can reuse it */ 6544 /* if we are only owner of page we can reuse it */
6542 if (unlikely(page_count(page) != 1)) 6545 if (unlikely(page_count(page) != 1))
@@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6603 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6606 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6604 6607
6605 /* we can reuse buffer as-is, just make sure it is local */ 6608 /* we can reuse buffer as-is, just make sure it is local */
6606 if (likely(page_to_nid(page) == numa_node_id())) 6609 if (likely((page_to_nid(page) == numa_node_id()) &&
6610 !page->pfmemalloc))
6607 return true; 6611 return true;
6608 6612
6609 /* this page cannot be reused so discard it */ 6613 /* this page cannot be reused so discard it */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3ce4a258f945..0ae038b9af90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
342 if (old == advertised) 342 if (old == advertised)
343 return err; 343 return err;
344 /* this sets the link speed and restarts auto-neg */ 344 /* this sets the link speed and restarts auto-neg */
345 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
346 usleep_range(1000, 2000);
347
345 hw->mac.autotry_restart = true; 348 hw->mac.autotry_restart = true;
346 err = hw->mac.ops.setup_link(hw, advertised, true); 349 err = hw->mac.ops.setup_link(hw, advertised, true);
347 if (err) { 350 if (err) {
348 e_info(probe, "setup link failed with code %d\n", err); 351 e_info(probe, "setup link failed with code %d\n", err);
349 hw->mac.ops.setup_link(hw, old, true); 352 hw->mac.ops.setup_link(hw, old, true);
350 } 353 }
354 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
351 } else { 355 } else {
352 /* in this case we currently only support 10Gb/FULL */ 356 /* in this case we currently only support 10Gb/FULL */
353 u32 speed = ethtool_cmd_speed(ecmd); 357 u32 speed = ethtool_cmd_speed(ecmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fec5212d4337..d2df4e3d1032 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4321 IXGBE_CB(skb)->page_released = false; 4321 IXGBE_CB(skb)->page_released = false;
4322 } 4322 }
4323 dev_kfree_skb(skb); 4323 dev_kfree_skb(skb);
4324 rx_buffer->skb = NULL;
4324 } 4325 }
4325 rx_buffer->skb = NULL;
4326 if (rx_buffer->dma) 4326 if (rx_buffer->dma)
4327 dma_unmap_page(dev, rx_buffer->dma, 4327 dma_unmap_page(dev, rx_buffer->dma,
4328 ixgbe_rx_pg_size(rx_ring), 4328 ixgbe_rx_pg_size(rx_ring),
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 5c4062921cdf..d0cecbdd9ba8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
836 * whether LSO is used */ 836 * whether LSO is used */
837 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; 837 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
838 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 838 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
839 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 839 if (!skb->encapsulation)
840 MLX4_WQE_CTRL_TCP_UDP_CSUM); 840 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
841 MLX4_WQE_CTRL_TCP_UDP_CSUM);
842 else
843 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
841 ring->tx_csum++; 844 ring->tx_csum++;
842 } 845 }
843 846
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index a49c9d11d8a5..49290a405903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1026 pr_cont("\n"); 1026 pr_cont("\n");
1027 } 1027 }
1028 } 1028 }
1029 synchronize_irq(eq->irq);
1029 1030
1030 mlx4_mtt_cleanup(dev, &eq->mtt); 1031 mlx4_mtt_cleanup(dev, &eq->mtt);
1031 for (i = 0; i < npages; ++i) 1032 for (i = 0; i < npages; ++i)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ca0f98c95105..872843179f44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
955 cur->ib.dst_gid_msk); 955 cur->ib.dst_gid_msk);
956 break; 956 break;
957 957
958 case MLX4_NET_TRANS_RULE_ID_VXLAN:
959 len += snprintf(buf + len, BUF_SIZE - len,
960 "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
961 break;
958 case MLX4_NET_TRANS_RULE_ID_IPV6: 962 case MLX4_NET_TRANS_RULE_ID_IPV6:
959 break; 963 break;
960 964
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ed53291468f3..a278238a2db6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -420,6 +420,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
420 if (err) 420 if (err)
421 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 421 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
422 eq->eqn); 422 eq->eqn);
423 synchronize_irq(table->msix_arr[eq->irqn].vector);
423 mlx5_buf_free(dev, &eq->buf); 424 mlx5_buf_free(dev, &eq->buf);
424 425
425 return err; 426 return err;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5e94d00b96b3..2c62208077fe 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
81#include <linux/workqueue.h> 81#include <linux/workqueue.h>
82#include <linux/of.h> 82#include <linux/of.h>
83#include <linux/of_device.h> 83#include <linux/of_device.h>
84#include <linux/of_gpio.h>
84 85
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/etherdevice.h> 87#include <linux/etherdevice.h>
@@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = {
2188 {}, 2189 {},
2189}; 2190};
2190MODULE_DEVICE_TABLE(of, smc91x_match); 2191MODULE_DEVICE_TABLE(of, smc91x_match);
2192
2193/**
2194 * of_try_set_control_gpio - configure a gpio if it exists
2195 */
2196static int try_toggle_control_gpio(struct device *dev,
2197 struct gpio_desc **desc,
2198 const char *name, int index,
2199 int value, unsigned int nsdelay)
2200{
2201 struct gpio_desc *gpio = *desc;
2202 int res;
2203
2204 gpio = devm_gpiod_get_index(dev, name, index);
2205 if (IS_ERR(gpio)) {
2206 if (PTR_ERR(gpio) == -ENOENT) {
2207 *desc = NULL;
2208 return 0;
2209 }
2210
2211 return PTR_ERR(gpio);
2212 }
2213 res = gpiod_direction_output(gpio, !value);
2214 if (res) {
2215 dev_err(dev, "unable to toggle gpio %s: %i\n", name, res);
2216 devm_gpiod_put(dev, gpio);
2217 gpio = NULL;
2218 return res;
2219 }
2220 if (nsdelay)
2221 usleep_range(nsdelay, 2 * nsdelay);
2222 gpiod_set_value_cansleep(gpio, value);
2223 *desc = gpio;
2224
2225 return 0;
2226}
2191#endif 2227#endif
2192 2228
2193/* 2229/*
@@ -2237,6 +2273,28 @@ static int smc_drv_probe(struct platform_device *pdev)
2237 struct device_node *np = pdev->dev.of_node; 2273 struct device_node *np = pdev->dev.of_node;
2238 u32 val; 2274 u32 val;
2239 2275
2276 /* Optional pwrdwn GPIO configured? */
2277 ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
2278 "power", 0, 0, 100);
2279 if (ret)
2280 return ret;
2281
2282 /*
2283 * Optional reset GPIO configured? Minimum 100 ns reset needed
2284 * according to LAN91C96 datasheet page 14.
2285 */
2286 ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
2287 "reset", 0, 0, 100);
2288 if (ret)
2289 return ret;
2290
2291 /*
2292 * Need to wait for optional EEPROM to load, max 750 us according
2293 * to LAN91C96 datasheet page 55.
2294 */
2295 if (lp->reset_gpio)
2296 usleep_range(750, 1000);
2297
2240 /* Combination of IO widths supported, default to 16-bit */ 2298 /* Combination of IO widths supported, default to 16-bit */
2241 if (!of_property_read_u32(np, "reg-io-width", &val)) { 2299 if (!of_property_read_u32(np, "reg-io-width", &val)) {
2242 if (val & 1) 2300 if (val & 1)
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 47dce918eb0f..2a38dacbbd27 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -298,6 +298,9 @@ struct smc_local {
298 struct sk_buff *pending_tx_skb; 298 struct sk_buff *pending_tx_skb;
299 struct tasklet_struct tx_task; 299 struct tasklet_struct tx_task;
300 300
301 struct gpio_desc *power_gpio;
302 struct gpio_desc *reset_gpio;
303
301 /* version/revision of the SMC91x chip */ 304 /* version/revision of the SMC91x chip */
302 int version; 305 int version;
303 306
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 655a23bbc451..e17a970eaf2b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg;
33static void stmmac_default_data(void) 33static void stmmac_default_data(void)
34{ 34{
35 memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); 35 memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
36
36 plat_dat.bus_id = 1; 37 plat_dat.bus_id = 1;
37 plat_dat.phy_addr = 0; 38 plat_dat.phy_addr = 0;
38 plat_dat.interface = PHY_INTERFACE_MODE_GMII; 39 plat_dat.interface = PHY_INTERFACE_MODE_GMII;
@@ -47,6 +48,12 @@ static void stmmac_default_data(void)
47 dma_cfg.pbl = 32; 48 dma_cfg.pbl = 32;
48 dma_cfg.burst_len = DMA_AXI_BLEN_256; 49 dma_cfg.burst_len = DMA_AXI_BLEN_256;
49 plat_dat.dma_cfg = &dma_cfg; 50 plat_dat.dma_cfg = &dma_cfg;
51
52 /* Set default value for multicast hash bins */
53 plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
54
55 /* Set default value for unicast filter entries */
56 plat_dat.unicast_filter_entries = 1;
50} 57}
51 58
52/** 59/**
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..9c014803b03b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1262 HMD(("init rxring, ")); 1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) { 1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb; 1264 struct sk_buff *skb;
1265 u32 mapping;
1265 1266
1266 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1267 if (!skb) { 1268 if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1272 1273
1273 /* Because we reserve afterwards. */ 1274 /* Because we reserve afterwards. */
1274 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1275 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1276 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), 1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1277 dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, 1285 mapping);
1278 DMA_FROM_DEVICE));
1279 skb_reserve(skb, RX_OFFSET); 1286 skb_reserve(skb, RX_OFFSET);
1280 } 1287 }
1281 1288
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2020 skb = hp->rx_skbs[elem]; 2027 skb = hp->rx_skbs[elem];
2021 if (len > RX_COPY_THRESHOLD) { 2028 if (len > RX_COPY_THRESHOLD) {
2022 struct sk_buff *new_skb; 2029 struct sk_buff *new_skb;
2030 u32 mapping;
2023 2031
2024 /* Now refill the entry, if we can. */ 2032 /* Now refill the entry, if we can. */
2025 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 2033 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2027 drops++; 2035 drops++;
2028 goto drop_it; 2036 goto drop_it;
2029 } 2037 }
2038 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040 RX_BUF_ALLOC_SIZE,
2041 DMA_FROM_DEVICE);
2042 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043 dev_kfree_skb_any(new_skb);
2044 drops++;
2045 goto drop_it;
2046 }
2047
2030 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); 2048 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2031 hp->rx_skbs[elem] = new_skb; 2049 hp->rx_skbs[elem] = new_skb;
2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2033 hme_write_rxd(hp, this, 2050 hme_write_rxd(hp, this,
2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2051 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2035 dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, 2052 mapping);
2036 DMA_FROM_DEVICE));
2037 skb_reserve(new_skb, RX_OFFSET); 2053 skb_reserve(new_skb, RX_OFFSET);
2038 2054
2039 /* Trim the original skb for the netif. */ 2055 /* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
2248 netif_wake_queue(dev); 2264 netif_wake_queue(dev);
2249} 2265}
2250 2266
2267static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268 u32 first_len, u32 first_entry, u32 entry)
2269{
2270 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271
2272 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273
2274 first_entry = NEXT_TX(first_entry);
2275 while (first_entry != entry) {
2276 struct happy_meal_txd *this = &txbase[first_entry];
2277 u32 addr, len;
2278
2279 addr = hme_read_desc32(hp, &this->tx_addr);
2280 len = hme_read_desc32(hp, &this->tx_flags);
2281 len &= TXFLAG_SIZE;
2282 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283 }
2284}
2285
2251static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, 2286static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2252 struct net_device *dev) 2287 struct net_device *dev)
2253{ 2288{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2284 2319
2285 len = skb->len; 2320 len = skb->len;
2286 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); 2321 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323 goto out_dma_error;
2287 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); 2324 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2288 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2325 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2289 (tx_flags | (len & TXFLAG_SIZE)), 2326 (tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2299 first_len = skb_headlen(skb); 2336 first_len = skb_headlen(skb);
2300 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, 2337 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2301 DMA_TO_DEVICE); 2338 DMA_TO_DEVICE);
2339 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340 goto out_dma_error;
2302 entry = NEXT_TX(entry); 2341 entry = NEXT_TX(entry);
2303 2342
2304 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2343 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2308 len = skb_frag_size(this_frag); 2347 len = skb_frag_size(this_frag);
2309 mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 2348 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2310 0, len, DMA_TO_DEVICE); 2349 0, len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351 unmap_partial_tx_skb(hp, first_mapping, first_len,
2352 first_entry, entry);
2353 goto out_dma_error;
2354 }
2311 this_txflags = tx_flags; 2355 this_txflags = tx_flags;
2312 if (frag == skb_shinfo(skb)->nr_frags - 1) 2356 if (frag == skb_shinfo(skb)->nr_frags - 1)
2313 this_txflags |= TXFLAG_EOP; 2357 this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2333 2377
2334 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); 2378 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2335 return NETDEV_TX_OK; 2379 return NETDEV_TX_OK;
2380
2381out_dma_error:
2382 hp->tx_skbs[hp->tx_new] = NULL;
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 dev_kfree_skb_any(skb);
2386 dev->stats.tx_dropped++;
2387 return NETDEV_TX_OK;
2336} 2388}
2337 2389
2338static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) 2390static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 96a61d169215..d8794488f80a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2022,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2022 parp = of_get_property(slave_node, "phy_id", &lenp); 2022 parp = of_get_property(slave_node, "phy_id", &lenp);
2023 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 2023 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
2024 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); 2024 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
2025 return -EINVAL; 2025 goto no_phy_slave;
2026 } 2026 }
2027 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 2027 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2028 phyid = be32_to_cpup(parp+1); 2028 phyid = be32_to_cpup(parp+1);
@@ -2035,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2035 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2035 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2036 PHY_ID_FMT, mdio->name, phyid); 2036 PHY_ID_FMT, mdio->name, phyid);
2037 2037
2038 slave_data->phy_if = of_get_phy_mode(slave_node);
2039 if (slave_data->phy_if < 0) {
2040 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2041 i);
2042 return slave_data->phy_if;
2043 }
2044
2045no_phy_slave:
2038 mac_addr = of_get_mac_address(slave_node); 2046 mac_addr = of_get_mac_address(slave_node);
2039 if (mac_addr) { 2047 if (mac_addr) {
2040 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 2048 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
@@ -2046,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2046 return ret; 2054 return ret;
2047 } 2055 }
2048 } 2056 }
2049
2050 slave_data->phy_if = of_get_phy_mode(slave_node);
2051 if (slave_data->phy_if < 0) {
2052 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2053 i);
2054 return slave_data->phy_if;
2055 }
2056
2057 if (data->dual_emac) { 2057 if (data->dual_emac) {
2058 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 2058 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2059 &prop)) { 2059 &prop)) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 29b3bb410781..bfb0b6ec8c56 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -272,7 +272,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
272 struct sk_buff *skb; 272 struct sk_buff *skb;
273 struct sk_buff_head list; 273 struct sk_buff_head list;
274 274
275 skb_queue_head_init(&list); 275 __skb_queue_head_init(&list);
276 276
277 spin_lock_bh(&port->bc_queue.lock); 277 spin_lock_bh(&port->bc_queue.lock);
278 skb_queue_splice_tail_init(&port->bc_queue, &list); 278 skb_queue_splice_tail_init(&port->bc_queue, &list);
@@ -1082,9 +1082,15 @@ static void macvlan_port_destroy(struct net_device *dev)
1082{ 1082{
1083 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1083 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
1084 1084
1085 cancel_work_sync(&port->bc_work);
1086 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1085 dev->priv_flags &= ~IFF_MACVLAN_PORT;
1087 netdev_rx_handler_unregister(dev); 1086 netdev_rx_handler_unregister(dev);
1087
1088 /* After this point, no packet can schedule bc_work anymore,
1089 * but we need to cancel it and purge left skbs if any.
1090 */
1091 cancel_work_sync(&port->bc_work);
1092 __skb_queue_purge(&port->bc_queue);
1093
1088 kfree_rcu(port, rcu); 1094 kfree_rcu(port, rcu);
1089} 1095}
1090 1096
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 65e2892342bd..6f226de655a4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -16,6 +16,7 @@
16#include <linux/idr.h> 16#include <linux/idr.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#include <net/ipv6.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
21#include <net/sock.h> 22#include <net/sock.h>
@@ -65,7 +66,7 @@ static struct cdev macvtap_cdev;
65static const struct proto_ops macvtap_socket_ops; 66static const struct proto_ops macvtap_socket_ops;
66 67
67#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
68 NETIF_F_TSO6 | NETIF_F_UFO) 69 NETIF_F_TSO6)
69#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
70#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
71 72
@@ -569,7 +570,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
569 gso_type = SKB_GSO_TCPV6; 570 gso_type = SKB_GSO_TCPV6;
570 break; 571 break;
571 case VIRTIO_NET_HDR_GSO_UDP: 572 case VIRTIO_NET_HDR_GSO_UDP:
573 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
574 current->comm);
572 gso_type = SKB_GSO_UDP; 575 gso_type = SKB_GSO_UDP;
576 if (skb->protocol == htons(ETH_P_IPV6))
577 ipv6_proxy_select_ident(skb);
573 break; 578 break;
574 default: 579 default:
575 return -EINVAL; 580 return -EINVAL;
@@ -614,8 +619,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
614 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 619 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
615 else if (sinfo->gso_type & SKB_GSO_TCPV6) 620 else if (sinfo->gso_type & SKB_GSO_TCPV6)
616 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 621 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
617 else if (sinfo->gso_type & SKB_GSO_UDP)
618 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
619 else 622 else
620 BUG(); 623 BUG();
621 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 624 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -950,9 +953,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
950 if (arg & TUN_F_TSO6) 953 if (arg & TUN_F_TSO6)
951 feature_mask |= NETIF_F_TSO6; 954 feature_mask |= NETIF_F_TSO6;
952 } 955 }
953
954 if (arg & TUN_F_UFO)
955 feature_mask |= NETIF_F_UFO;
956 } 956 }
957 957
958 /* tun/tap driver inverts the usage for TSO offloads, where 958 /* tun/tap driver inverts the usage for TSO offloads, where
@@ -963,7 +963,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
963 * When user space turns off TSO, we turn off GSO/LRO so that 963 * When user space turns off TSO, we turn off GSO/LRO so that
964 * user-space will not receive TSO frames. 964 * user-space will not receive TSO frames.
965 */ 965 */
966 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) 966 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
967 features |= RX_OFFLOADS; 967 features |= RX_OFFLOADS;
968 else 968 else
969 features &= ~RX_OFFLOADS; 969 features &= ~RX_OFFLOADS;
@@ -1064,7 +1064,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1064 case TUNSETOFFLOAD: 1064 case TUNSETOFFLOAD:
1065 /* let the user check for future flags */ 1065 /* let the user check for future flags */
1066 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1066 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1067 TUN_F_TSO_ECN | TUN_F_UFO)) 1067 TUN_F_TSO_ECN))
1068 return -EINVAL; 1068 return -EINVAL;
1069 1069
1070 rtnl_lock(); 1070 rtnl_lock();
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 3e3d7cadeb27..bb4d780c0838 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -58,6 +58,10 @@
58#define MII_M1145_HWCFG_MODE_MASK 0xf 58#define MII_M1145_HWCFG_MODE_MASK 0xf
59#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 59#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
60 60
61#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
62#define MII_M1145_HWCFG_MODE_MASK 0xf
63#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
64
61#define MII_M1111_PHY_LED_CONTROL 0x18 65#define MII_M1111_PHY_LED_CONTROL 0x18
62#define MII_M1111_PHY_LED_DIRECT 0x4100 66#define MII_M1111_PHY_LED_DIRECT 0x4100
63#define MII_M1111_PHY_LED_COMBINE 0x411c 67#define MII_M1111_PHY_LED_COMBINE 0x411c
@@ -708,7 +712,7 @@ static int m88e1145_config_init(struct phy_device *phydev)
708 if (temp < 0) 712 if (temp < 0)
709 return temp; 713 return temp;
710 714
711 temp &= ~(MII_M1145_HWCFG_MODE_MASK); 715 temp &= ~MII_M1145_HWCFG_MODE_MASK;
712 temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK; 716 temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
713 temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO; 717 temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
714 718
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 186ce541c657..7302398f0b1f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,6 +65,7 @@
65#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
66#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
67#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
68#include <net/ipv6.h>
68#include <net/net_namespace.h> 69#include <net/net_namespace.h>
69#include <net/netns/generic.h> 70#include <net/netns/generic.h>
70#include <net/rtnetlink.h> 71#include <net/rtnetlink.h>
@@ -174,7 +175,7 @@ struct tun_struct {
174 struct net_device *dev; 175 struct net_device *dev;
175 netdev_features_t set_features; 176 netdev_features_t set_features;
176#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 177#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
177 NETIF_F_TSO6|NETIF_F_UFO) 178 NETIF_F_TSO6)
178 179
179 int vnet_hdr_sz; 180 int vnet_hdr_sz;
180 int sndbuf; 181 int sndbuf;
@@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1139 break; 1140 break;
1140 } 1141 }
1141 1142
1143 skb_reset_network_header(skb);
1144
1142 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1145 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1143 pr_debug("GSO!\n"); 1146 pr_debug("GSO!\n");
1144 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1147 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1149,8 +1152,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1149 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1152 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1150 break; 1153 break;
1151 case VIRTIO_NET_HDR_GSO_UDP: 1154 case VIRTIO_NET_HDR_GSO_UDP:
1155 {
1156 static bool warned;
1157
1158 if (!warned) {
1159 warned = true;
1160 netdev_warn(tun->dev,
1161 "%s: using disabled UFO feature; please fix this program\n",
1162 current->comm);
1163 }
1152 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1164 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1165 if (skb->protocol == htons(ETH_P_IPV6))
1166 ipv6_proxy_select_ident(skb);
1153 break; 1167 break;
1168 }
1154 default: 1169 default:
1155 tun->dev->stats.rx_frame_errors++; 1170 tun->dev->stats.rx_frame_errors++;
1156 kfree_skb(skb); 1171 kfree_skb(skb);
@@ -1179,7 +1194,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1179 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1194 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1180 } 1195 }
1181 1196
1182 skb_reset_network_header(skb);
1183 skb_probe_transport_header(skb, 0); 1197 skb_probe_transport_header(skb, 0);
1184 1198
1185 rxhash = skb_get_hash(skb); 1199 rxhash = skb_get_hash(skb);
@@ -1251,8 +1265,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1251 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1265 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1252 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1266 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1253 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1267 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1254 else if (sinfo->gso_type & SKB_GSO_UDP)
1255 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1256 else { 1268 else {
1257 pr_err("unexpected GSO type: " 1269 pr_err("unexpected GSO type: "
1258 "0x%x, gso_size %d, hdr_len %d\n", 1270 "0x%x, gso_size %d, hdr_len %d\n",
@@ -1762,11 +1774,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1762 features |= NETIF_F_TSO6; 1774 features |= NETIF_F_TSO6;
1763 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1775 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1764 } 1776 }
1765
1766 if (arg & TUN_F_UFO) {
1767 features |= NETIF_F_UFO;
1768 arg &= ~TUN_F_UFO;
1769 }
1770 } 1777 }
1771 1778
1772 /* This gives the user a way to test for new features in future by 1779 /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2a32d9167d3b..d3920b54a92c 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -67,6 +67,35 @@ static const u8 mbm_guid[16] = {
67 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, 67 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
68}; 68};
69 69
70static void usbnet_cdc_update_filter(struct usbnet *dev)
71{
72 struct cdc_state *info = (void *) &dev->data;
73 struct usb_interface *intf = info->control;
74
75 u16 cdc_filter =
76 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED |
77 USB_CDC_PACKET_TYPE_BROADCAST;
78
79 if (dev->net->flags & IFF_PROMISC)
80 cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
81
82 /* FIXME cdc-ether has some multicast code too, though it complains
83 * in routine cases. info->ether describes the multicast support.
84 * Implement that here, manipulating the cdc filter as needed.
85 */
86
87 usb_control_msg(dev->udev,
88 usb_sndctrlpipe(dev->udev, 0),
89 USB_CDC_SET_ETHERNET_PACKET_FILTER,
90 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
91 cdc_filter,
92 intf->cur_altsetting->desc.bInterfaceNumber,
93 NULL,
94 0,
95 USB_CTRL_SET_TIMEOUT
96 );
97}
98
70/* probes control interface, claims data interface, collects the bulk 99/* probes control interface, claims data interface, collects the bulk
71 * endpoints, activates data interface (if needed), maybe sets MTU. 100 * endpoints, activates data interface (if needed), maybe sets MTU.
72 * all pure cdc, except for certain firmware workarounds, and knowing 101 * all pure cdc, except for certain firmware workarounds, and knowing
@@ -347,16 +376,8 @@ next_desc:
347 * don't do reset all the way. So the packet filter should 376 * don't do reset all the way. So the packet filter should
348 * be set to a sane initial value. 377 * be set to a sane initial value.
349 */ 378 */
350 usb_control_msg(dev->udev, 379 usbnet_cdc_update_filter(dev);
351 usb_sndctrlpipe(dev->udev, 0), 380
352 USB_CDC_SET_ETHERNET_PACKET_FILTER,
353 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
354 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
355 intf->cur_altsetting->desc.bInterfaceNumber,
356 NULL,
357 0,
358 USB_CTRL_SET_TIMEOUT
359 );
360 return 0; 381 return 0;
361 382
362bad_desc: 383bad_desc:
@@ -468,10 +489,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
468 return status; 489 return status;
469 } 490 }
470 491
471 /* FIXME cdc-ether has some multicast code too, though it complains
472 * in routine cases. info->ether describes the multicast support.
473 * Implement that here, manipulating the cdc filter as needed.
474 */
475 return 0; 492 return 0;
476} 493}
477EXPORT_SYMBOL_GPL(usbnet_cdc_bind); 494EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
@@ -482,6 +499,7 @@ static const struct driver_info cdc_info = {
482 .bind = usbnet_cdc_bind, 499 .bind = usbnet_cdc_bind,
483 .unbind = usbnet_cdc_unbind, 500 .unbind = usbnet_cdc_unbind,
484 .status = usbnet_cdc_status, 501 .status = usbnet_cdc_status,
502 .set_rx_mode = usbnet_cdc_update_filter,
485 .manage_power = usbnet_manage_power, 503 .manage_power = usbnet_manage_power,
486}; 504};
487 505
@@ -491,6 +509,7 @@ static const struct driver_info wwan_info = {
491 .bind = usbnet_cdc_bind, 509 .bind = usbnet_cdc_bind,
492 .unbind = usbnet_cdc_unbind, 510 .unbind = usbnet_cdc_unbind,
493 .status = usbnet_cdc_status, 511 .status = usbnet_cdc_status,
512 .set_rx_mode = usbnet_cdc_update_filter,
494 .manage_power = usbnet_manage_power, 513 .manage_power = usbnet_manage_power,
495}; 514};
496 515
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f11633588a0a..8ded08e027fb 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1169,6 +1169,9 @@ static void intr_callback(struct urb *urb)
1169 case -ESHUTDOWN: 1169 case -ESHUTDOWN:
1170 netif_device_detach(tp->netdev); 1170 netif_device_detach(tp->netdev);
1171 case -ENOENT: 1171 case -ENOENT:
1172 case -EPROTO:
1173 netif_info(tp, intr, tp->netdev,
1174 "Stop submitting intr, status %d\n", status);
1172 return; 1175 return;
1173 case -EOVERFLOW: 1176 case -EOVERFLOW:
1174 netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); 1177 netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
@@ -2901,6 +2904,9 @@ static int rtl8152_open(struct net_device *netdev)
2901 if (res) 2904 if (res)
2902 goto out; 2905 goto out;
2903 2906
2907 /* set speed to 0 to avoid autoresume try to submit rx */
2908 tp->speed = 0;
2909
2904 res = usb_autopm_get_interface(tp->intf); 2910 res = usb_autopm_get_interface(tp->intf);
2905 if (res < 0) { 2911 if (res < 0) {
2906 free_all_mem(tp); 2912 free_all_mem(tp);
@@ -2914,6 +2920,8 @@ static int rtl8152_open(struct net_device *netdev)
2914 clear_bit(WORK_ENABLE, &tp->flags); 2920 clear_bit(WORK_ENABLE, &tp->flags);
2915 usb_kill_urb(tp->intr_urb); 2921 usb_kill_urb(tp->intr_urb);
2916 cancel_delayed_work_sync(&tp->schedule); 2922 cancel_delayed_work_sync(&tp->schedule);
2923
2924 /* disable the tx/rx, if the workqueue has enabled them. */
2917 if (tp->speed & LINK_STATUS) 2925 if (tp->speed & LINK_STATUS)
2918 tp->rtl_ops.disable(tp); 2926 tp->rtl_ops.disable(tp);
2919 } 2927 }
@@ -2965,10 +2973,7 @@ static int rtl8152_close(struct net_device *netdev)
2965 * be disable when autoresume occurs, because the 2973 * be disable when autoresume occurs, because the
2966 * netif_running() would be false. 2974 * netif_running() would be false.
2967 */ 2975 */
2968 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 2976 rtl_runtime_suspend_enable(tp, false);
2969 rtl_runtime_suspend_enable(tp, false);
2970 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
2971 }
2972 2977
2973 tasklet_disable(&tp->tl); 2978 tasklet_disable(&tp->tl);
2974 tp->rtl_ops.down(tp); 2979 tp->rtl_ops.down(tp);
@@ -3215,7 +3220,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3215 netif_device_detach(netdev); 3220 netif_device_detach(netdev);
3216 } 3221 }
3217 3222
3218 if (netif_running(netdev)) { 3223 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3219 clear_bit(WORK_ENABLE, &tp->flags); 3224 clear_bit(WORK_ENABLE, &tp->flags);
3220 usb_kill_urb(tp->intr_urb); 3225 usb_kill_urb(tp->intr_urb);
3221 tasklet_disable(&tp->tl); 3226 tasklet_disable(&tp->tl);
@@ -3263,6 +3268,8 @@ static int rtl8152_resume(struct usb_interface *intf)
3263 set_bit(WORK_ENABLE, &tp->flags); 3268 set_bit(WORK_ENABLE, &tp->flags);
3264 } 3269 }
3265 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3270 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3271 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3272 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3266 } 3273 }
3267 3274
3268 mutex_unlock(&tp->control); 3275 mutex_unlock(&tp->control);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 20615bbd693b..3a6770a65d78 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1052,6 +1052,21 @@ static void __handle_link_change(struct usbnet *dev)
1052 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1052 clear_bit(EVENT_LINK_CHANGE, &dev->flags);
1053} 1053}
1054 1054
1055static void usbnet_set_rx_mode(struct net_device *net)
1056{
1057 struct usbnet *dev = netdev_priv(net);
1058
1059 usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
1060}
1061
1062static void __handle_set_rx_mode(struct usbnet *dev)
1063{
1064 if (dev->driver_info->set_rx_mode)
1065 (dev->driver_info->set_rx_mode)(dev);
1066
1067 clear_bit(EVENT_SET_RX_MODE, &dev->flags);
1068}
1069
1055/* work that cannot be done in interrupt context uses keventd. 1070/* work that cannot be done in interrupt context uses keventd.
1056 * 1071 *
1057 * NOTE: with 2.5 we could do more of this using completion callbacks, 1072 * NOTE: with 2.5 we could do more of this using completion callbacks,
@@ -1157,6 +1172,10 @@ skip_reset:
1157 if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) 1172 if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
1158 __handle_link_change(dev); 1173 __handle_link_change(dev);
1159 1174
1175 if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
1176 __handle_set_rx_mode(dev);
1177
1178
1160 if (dev->flags) 1179 if (dev->flags)
1161 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1180 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1162} 1181}
@@ -1525,6 +1544,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
1525 .ndo_stop = usbnet_stop, 1544 .ndo_stop = usbnet_stop,
1526 .ndo_start_xmit = usbnet_start_xmit, 1545 .ndo_start_xmit = usbnet_start_xmit,
1527 .ndo_tx_timeout = usbnet_tx_timeout, 1546 .ndo_tx_timeout = usbnet_tx_timeout,
1547 .ndo_set_rx_mode = usbnet_set_rx_mode,
1528 .ndo_change_mtu = usbnet_change_mtu, 1548 .ndo_change_mtu = usbnet_change_mtu,
1529 .ndo_set_mac_address = eth_mac_addr, 1549 .ndo_set_mac_address = eth_mac_addr,
1530 .ndo_validate_addr = eth_validate_addr, 1550 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d75256bd1a6a..ec2a8b41ed41 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -491,8 +491,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
491 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 491 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
492 break; 492 break;
493 case VIRTIO_NET_HDR_GSO_UDP: 493 case VIRTIO_NET_HDR_GSO_UDP:
494 {
495 static bool warned;
496
497 if (!warned) {
498 warned = true;
499 netdev_warn(dev,
500 "host using disabled UFO feature; please fix it\n");
501 }
494 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 502 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
495 break; 503 break;
504 }
496 case VIRTIO_NET_HDR_GSO_TCPV6: 505 case VIRTIO_NET_HDR_GSO_TCPV6:
497 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 506 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
498 break; 507 break;
@@ -881,8 +890,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
881 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
882 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 891 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
883 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 892 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
884 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
885 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
886 else 893 else
887 BUG(); 894 BUG();
888 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 895 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1705,7 +1712,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1705 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1712 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1706 1713
1707 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1714 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1708 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1715 dev->hw_features |= NETIF_F_TSO
1709 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1716 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1710 } 1717 }
1711 /* Individual feature bits: what can host handle? */ 1718 /* Individual feature bits: what can host handle? */
@@ -1715,11 +1722,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1715 dev->hw_features |= NETIF_F_TSO6; 1722 dev->hw_features |= NETIF_F_TSO6;
1716 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1723 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1717 dev->hw_features |= NETIF_F_TSO_ECN; 1724 dev->hw_features |= NETIF_F_TSO_ECN;
1718 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1719 dev->hw_features |= NETIF_F_UFO;
1720 1725
1721 if (gso) 1726 if (gso)
1722 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 1727 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
1723 /* (!csum && gso) case will be fixed by register_netdev() */ 1728 /* (!csum && gso) case will be fixed by register_netdev() */
1724 } 1729 }
1725 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 1730 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1757,8 +1762,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1757 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1762 /* If we can receive ANY GSO packets, we must allocate large ones. */
1758 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1759 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1764 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1760 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 1765 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1761 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1762 vi->big_packets = true; 1766 vi->big_packets = true;
1763 1767
1764 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1768 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1952,9 +1956,9 @@ static struct virtio_device_id id_table[] = {
1952static unsigned int features[] = { 1956static unsigned int features[] = {
1953 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1957 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1954 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1958 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1955 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 1959 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
1956 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1960 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1957 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1961 VIRTIO_NET_F_GUEST_ECN,
1958 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1962 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1959 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1963 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1960 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 1964 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e5ba6faf3281..86907e5ba6ca 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -80,6 +80,7 @@ struct reg_dmn_pair_mapping {
80 80
81struct ath_regulatory { 81struct ath_regulatory {
82 char alpha2[2]; 82 char alpha2[2];
83 enum nl80211_dfs_regions region;
83 u16 country_code; 84 u16 country_code;
84 u16 max_power_level; 85 u16 max_power_level;
85 u16 current_rd; 86 u16 current_rd;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index c6dd7f1fed65..33b0c7aef2ea 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -368,11 +368,11 @@ void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
368{ 368{
369 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 369 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
370 370
371 if (reg->power_limit != new_txpow) { 371 if (reg->power_limit != new_txpow)
372 ath9k_hw_set_txpowerlimit(ah, new_txpow, false); 372 ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
373 /* read back in case value is clamped */ 373
374 *txpower = reg->max_power_level; 374 /* read back in case value is clamped */
375 } 375 *txpower = reg->max_power_level;
376} 376}
377EXPORT_SYMBOL(ath9k_cmn_update_txpow); 377EXPORT_SYMBOL(ath9k_cmn_update_txpow);
378 378
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 46f20a309b5f..5c45e787814e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -455,7 +455,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
455 "%2d %2x %1x %2x %2x\n", 455 "%2d %2x %1x %2x %2x\n",
456 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, 456 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
457 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), 457 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
458 val[2] & (0x7 << (i * 3)) >> (i * 3), 458 (val[2] & (0x7 << (i * 3))) >> (i * 3),
459 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); 459 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
460 } 460 }
461 461
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 156a944134dc..3bd030494986 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -734,6 +734,32 @@ static const struct ieee80211_iface_combination if_comb[] = {
734#endif 734#endif
735}; 735};
736 736
737#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
738static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
739{
740 struct ath_hw *ah = sc->sc_ah;
741 struct ath_common *common = ath9k_hw_common(ah);
742
743 if (!ath9k_is_chanctx_enabled())
744 return;
745
746 hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
747 hw->queues = ATH9K_NUM_TX_QUEUES;
748 hw->offchannel_tx_hw_queue = hw->queues - 1;
749 hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
750 hw->wiphy->iface_combinations = if_comb_multi;
751 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
752 hw->wiphy->max_scan_ssids = 255;
753 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
754 hw->wiphy->max_remain_on_channel_duration = 10000;
755 hw->chanctx_data_size = sizeof(void *);
756 hw->extra_beacon_tailroom =
757 sizeof(struct ieee80211_p2p_noa_attr) + 9;
758
759 ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
760}
761#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
762
737static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 763static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
738{ 764{
739 struct ath_hw *ah = sc->sc_ah; 765 struct ath_hw *ah = sc->sc_ah;
@@ -746,7 +772,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
746 IEEE80211_HW_SPECTRUM_MGMT | 772 IEEE80211_HW_SPECTRUM_MGMT |
747 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 773 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
748 IEEE80211_HW_SUPPORTS_RC_TABLE | 774 IEEE80211_HW_SUPPORTS_RC_TABLE |
749 IEEE80211_HW_QUEUE_CONTROL |
750 IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 775 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
751 776
752 if (ath9k_ps_enable) 777 if (ath9k_ps_enable)
@@ -781,24 +806,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
781 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 806 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
782 } 807 }
783 808
784#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
785
786 if (ath9k_is_chanctx_enabled()) {
787 hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
788 hw->wiphy->iface_combinations = if_comb_multi;
789 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
790 hw->wiphy->max_scan_ssids = 255;
791 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
792 hw->wiphy->max_remain_on_channel_duration = 10000;
793 hw->chanctx_data_size = sizeof(void *);
794 hw->extra_beacon_tailroom =
795 sizeof(struct ieee80211_p2p_noa_attr) + 9;
796
797 ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
798 }
799
800#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
801
802 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 809 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
803 810
804 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 811 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -808,12 +815,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
808 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 815 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
809 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 816 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
810 817
811 /* allow 4 queues per channel context + 818 hw->queues = 4;
812 * 1 cab queue + 1 offchannel tx queue
813 */
814 hw->queues = ATH9K_NUM_TX_QUEUES;
815 /* last queue for offchannel */
816 hw->offchannel_tx_hw_queue = hw->queues - 1;
817 hw->max_rates = 4; 819 hw->max_rates = 4;
818 hw->max_listen_interval = 10; 820 hw->max_listen_interval = 10;
819 hw->max_rate_tries = 10; 821 hw->max_rate_tries = 10;
@@ -837,6 +839,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
837 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 839 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
838 &common->sbands[IEEE80211_BAND_5GHZ]; 840 &common->sbands[IEEE80211_BAND_5GHZ];
839 841
842#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
843 ath9k_set_mcc_capab(sc, hw);
844#endif
840 ath9k_init_wow(hw); 845 ath9k_init_wow(hw);
841 ath9k_cmn_reload_chainmask(ah); 846 ath9k_cmn_reload_chainmask(ah);
842 847
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6f6a974f7fdb..30c66dfcd7a0 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1162,6 +1162,9 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
1162{ 1162{
1163 int i; 1163 int i;
1164 1164
1165 if (!ath9k_is_chanctx_enabled())
1166 return;
1167
1165 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1168 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1166 vif->hw_queue[i] = i; 1169 vif->hw_queue[i] = i;
1167 1170
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 493a183d0aaf..d6e54a3c88f6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -169,7 +169,10 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
169 169
170 if (txq->stopped && 170 if (txq->stopped &&
171 txq->pending_frames < sc->tx.txq_max_pending[q]) { 171 txq->pending_frames < sc->tx.txq_max_pending[q]) {
172 ieee80211_wake_queue(sc->hw, info->hw_queue); 172 if (ath9k_is_chanctx_enabled())
173 ieee80211_wake_queue(sc->hw, info->hw_queue);
174 else
175 ieee80211_wake_queue(sc->hw, q);
173 txq->stopped = false; 176 txq->stopped = false;
174 } 177 }
175} 178}
@@ -2247,7 +2250,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2247 fi->txq = q; 2250 fi->txq = q;
2248 if (++txq->pending_frames > sc->tx.txq_max_pending[q] && 2251 if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
2249 !txq->stopped) { 2252 !txq->stopped) {
2250 ieee80211_stop_queue(sc->hw, info->hw_queue); 2253 if (ath9k_is_chanctx_enabled())
2254 ieee80211_stop_queue(sc->hw, info->hw_queue);
2255 else
2256 ieee80211_stop_queue(sc->hw, q);
2251 txq->stopped = true; 2257 txq->stopped = true;
2252 } 2258 }
2253 } 2259 }
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 415393dfb6fc..06ea6cc9e30a 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -515,6 +515,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
515 if (!request) 515 if (!request)
516 return; 516 return;
517 517
518 reg->region = request->dfs_region;
518 switch (request->initiator) { 519 switch (request->initiator) {
519 case NL80211_REGDOM_SET_BY_CORE: 520 case NL80211_REGDOM_SET_BY_CORE:
520 /* 521 /*
@@ -779,6 +780,19 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
779 return SD_NO_CTL; 780 return SD_NO_CTL;
780 } 781 }
781 782
783 if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) {
784 switch (reg->region) {
785 case NL80211_DFS_FCC:
786 return CTL_FCC;
787 case NL80211_DFS_ETSI:
788 return CTL_ETSI;
789 case NL80211_DFS_JP:
790 return CTL_MKK;
791 default:
792 break;
793 }
794 }
795
782 switch (band) { 796 switch (band) {
783 case IEEE80211_BAND_2GHZ: 797 case IEEE80211_BAND_2GHZ:
784 return reg->regpair->reg_2ghz_ctl; 798 return reg->regpair->reg_2ghz_ctl;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index f55f625fd06b..d20d4e6f391a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -670,7 +670,6 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
670 struct brcmf_sdio_dev *sdiodev) 670 struct brcmf_sdio_dev *sdiodev)
671{ 671{
672 int i; 672 int i;
673 uint fw_len, nv_len;
674 char end; 673 char end;
675 674
676 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) { 675 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
@@ -684,25 +683,25 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
684 return -ENODEV; 683 return -ENODEV;
685 } 684 }
686 685
687 fw_len = sizeof(sdiodev->fw_name) - 1;
688 nv_len = sizeof(sdiodev->nvram_name) - 1;
689 /* check if firmware path is provided by module parameter */ 686 /* check if firmware path is provided by module parameter */
690 if (brcmf_firmware_path[0] != '\0') { 687 if (brcmf_firmware_path[0] != '\0') {
691 strncpy(sdiodev->fw_name, brcmf_firmware_path, fw_len); 688 strlcpy(sdiodev->fw_name, brcmf_firmware_path,
692 strncpy(sdiodev->nvram_name, brcmf_firmware_path, nv_len); 689 sizeof(sdiodev->fw_name));
693 fw_len -= strlen(sdiodev->fw_name); 690 strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
694 nv_len -= strlen(sdiodev->nvram_name); 691 sizeof(sdiodev->nvram_name));
695 692
696 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; 693 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
697 if (end != '/') { 694 if (end != '/') {
698 strncat(sdiodev->fw_name, "/", fw_len); 695 strlcat(sdiodev->fw_name, "/",
699 strncat(sdiodev->nvram_name, "/", nv_len); 696 sizeof(sdiodev->fw_name));
700 fw_len--; 697 strlcat(sdiodev->nvram_name, "/",
701 nv_len--; 698 sizeof(sdiodev->nvram_name));
702 } 699 }
703 } 700 }
704 strncat(sdiodev->fw_name, brcmf_fwname_data[i].bin, fw_len); 701 strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
705 strncat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, nv_len); 702 sizeof(sdiodev->fw_name));
703 strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
704 sizeof(sdiodev->nvram_name));
706 705
707 return 0; 706 return 0;
708} 707}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 2364a3c09b9e..cae692ff1013 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1095 u32 queues, bool drop) 1095 u32 queues, bool drop)
1096{ 1096{
1097 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1097 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1098 u32 scd_queues;
1098 1099
1099 mutex_lock(&priv->mutex); 1100 mutex_lock(&priv->mutex);
1100 IWL_DEBUG_MAC80211(priv, "enter\n"); 1101 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1108 goto done; 1109 goto done;
1109 } 1110 }
1110 1111
1111 /* 1112 scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
1112 * mac80211 will not push any more frames for transmit 1113 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
1113 * until the flush is completed 1114 BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
1114 */ 1115
1115 if (drop) { 1116 if (vif)
1116 IWL_DEBUG_MAC80211(priv, "send flush command\n"); 1117 scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
1117 if (iwlagn_txfifo_flush(priv, 0)) { 1118
1118 IWL_ERR(priv, "flush request fail\n"); 1119 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
1119 goto done; 1120 if (iwlagn_txfifo_flush(priv, scd_queues)) {
1120 } 1121 IWL_ERR(priv, "flush request fail\n");
1122 goto done;
1121 } 1123 }
1122 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 1124 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
1123 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); 1125 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
1124done: 1126done:
1125 mutex_unlock(&priv->mutex); 1127 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index e4351487ca72..d2b7234b1c73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -82,7 +82,8 @@
82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ 82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
83 83
84#define IWL8000_FW_PRE "iwlwifi-8000" 84#define IWL8000_FW_PRE "iwlwifi-8000"
85#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode" 85#define IWL8000_MODULE_FIRMWARE(api) \
86 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
86 87
87#define NVM_HW_SECTION_NUM_FAMILY_8000 10 88#define NVM_HW_SECTION_NUM_FAMILY_8000 10
88#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin" 89#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 9eb85249e89c..d8fc548c0d6c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -563,6 +563,7 @@ enum iwl_trans_state {
563 * Set during transport allocation. 563 * Set during transport allocation.
564 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 564 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
565 * @pm_support: set to true in start_hw if link pm is supported 565 * @pm_support: set to true in start_hw if link pm is supported
566 * @ltr_enabled: set to true if the LTR is enabled
566 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 567 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
567 * The user should use iwl_trans_{alloc,free}_tx_cmd. 568 * The user should use iwl_trans_{alloc,free}_tx_cmd.
568 * @dev_cmd_headroom: room needed for the transport's private use before the 569 * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -589,6 +590,7 @@ struct iwl_trans {
589 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 590 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
590 591
591 bool pm_support; 592 bool pm_support;
593 bool ltr_enabled;
592 594
593 /* The following fields are internal only */ 595 /* The following fields are internal only */
594 struct kmem_cache *dev_cmd_pool; 596 struct kmem_cache *dev_cmd_pool;
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 8df2021f9856..da2ffb785194 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -303,8 +303,8 @@ static const __le64 iwl_ci_mask[][3] = {
303}; 303};
304 304
305static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { 305static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
306 cpu_to_le32(0x28412201), 306 cpu_to_le32(0x2e402280),
307 cpu_to_le32(0x11118451), 307 cpu_to_le32(0x7711a751),
308}; 308};
309 309
310struct corunning_block_luts { 310struct corunning_block_luts {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 585c0ab4a3ec..8a1d2f33d5b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -291,8 +291,8 @@ static const __le64 iwl_ci_mask[][3] = {
291}; 291};
292 292
293static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { 293static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
294 cpu_to_le32(0x28412201), 294 cpu_to_le32(0x2e402280),
295 cpu_to_le32(0x11118451), 295 cpu_to_le32(0x7711a751),
296}; 296};
297 297
298struct corunning_block_luts { 298struct corunning_block_luts {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 27dd86395b39..2fd8ad4633e0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -68,13 +68,46 @@
68 68
69/* Power Management Commands, Responses, Notifications */ 69/* Power Management Commands, Responses, Notifications */
70 70
71/**
72 * enum iwl_ltr_config_flags - masks for LTR config command flags
73 * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
74 * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
75 * memory access
76 * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
77 * reg change
78 * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
79 * D0 to D3
80 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
81 * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
82 * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
83 */
84enum iwl_ltr_config_flags {
85 LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
86 LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
87 LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
88 LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
89 LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
90 LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
91 LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
92};
93
94/**
95 * struct iwl_ltr_config_cmd - configures the LTR
96 * @flags: See %enum iwl_ltr_config_flags
97 */
98struct iwl_ltr_config_cmd {
99 __le32 flags;
100 __le32 static_long;
101 __le32 static_short;
102} __packed;
103
71/* Radio LP RX Energy Threshold measured in dBm */ 104/* Radio LP RX Energy Threshold measured in dBm */
72#define POWER_LPRX_RSSI_THRESHOLD 75 105#define POWER_LPRX_RSSI_THRESHOLD 75
73#define POWER_LPRX_RSSI_THRESHOLD_MAX 94 106#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
74#define POWER_LPRX_RSSI_THRESHOLD_MIN 30 107#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
75 108
76/** 109/**
77 * enum iwl_scan_flags - masks for power table command flags 110 * enum iwl_power_flags - masks for power table command flags
78 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off 111 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
79 * receiver and transmitter. '0' - does not allow. 112 * receiver and transmitter. '0' - does not allow.
80 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, 113 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 667a92274c87..c62575d86bcd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -157,6 +157,7 @@ enum {
157 /* Power - legacy power table command */ 157 /* Power - legacy power table command */
158 POWER_TABLE_CMD = 0x77, 158 POWER_TABLE_CMD = 0x77,
159 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, 159 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
160 LTR_CONFIG = 0xee,
160 161
161 /* Thermal Throttling*/ 162 /* Thermal Throttling*/
162 REPLY_THERMAL_MNG_BACKOFF = 0x7e, 163 REPLY_THERMAL_MNG_BACKOFF = 0x7e,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 23fd711a67e4..e0d9f19650b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -480,6 +480,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
480 /* Initialize tx backoffs to the minimal possible */ 480 /* Initialize tx backoffs to the minimal possible */
481 iwl_mvm_tt_tx_backoff(mvm, 0); 481 iwl_mvm_tt_tx_backoff(mvm, 0);
482 482
483 if (mvm->trans->ltr_enabled) {
484 struct iwl_ltr_config_cmd cmd = {
485 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
486 };
487
488 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
489 sizeof(cmd), &cmd));
490 }
491
483 ret = iwl_mvm_power_update_device(mvm); 492 ret = iwl_mvm_power_update_device(mvm);
484 if (ret) 493 if (ret)
485 goto error; 494 goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c7a73c68bdab..585fe5b7100f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -526,7 +526,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
526 } 526 }
527 527
528 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 528 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
529 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) 529 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
530 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
530 goto drop; 531 goto drop;
531 532
532 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ 533 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
@@ -1734,6 +1735,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
1734 if (changes & BSS_CHANGED_BEACON && 1735 if (changes & BSS_CHANGED_BEACON &&
1735 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 1736 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
1736 IWL_WARN(mvm, "Failed updating beacon data\n"); 1737 IWL_WARN(mvm, "Failed updating beacon data\n");
1738
1739 if (changes & BSS_CHANGED_TXPOWER) {
1740 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
1741 bss_conf->txpower);
1742 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
1743 }
1744
1737} 1745}
1738 1746
1739static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 1747static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2367,14 +2375,19 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2367 /* Set the node address */ 2375 /* Set the node address */
2368 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); 2376 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
2369 2377
2378 lockdep_assert_held(&mvm->mutex);
2379
2380 spin_lock_bh(&mvm->time_event_lock);
2381
2382 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
2383 spin_unlock_bh(&mvm->time_event_lock);
2384 return -EIO;
2385 }
2386
2370 te_data->vif = vif; 2387 te_data->vif = vif;
2371 te_data->duration = duration; 2388 te_data->duration = duration;
2372 te_data->id = HOT_SPOT_CMD; 2389 te_data->id = HOT_SPOT_CMD;
2373 2390
2374 lockdep_assert_held(&mvm->mutex);
2375
2376 spin_lock_bh(&mvm->time_event_lock);
2377 list_add_tail(&te_data->list, &mvm->time_event_list);
2378 spin_unlock_bh(&mvm->time_event_lock); 2391 spin_unlock_bh(&mvm->time_event_lock);
2379 2392
2380 /* 2393 /*
@@ -2430,22 +2443,23 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
2430 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 2443 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
2431 duration, type); 2444 duration, type);
2432 2445
2446 mutex_lock(&mvm->mutex);
2447
2433 switch (vif->type) { 2448 switch (vif->type) {
2434 case NL80211_IFTYPE_STATION: 2449 case NL80211_IFTYPE_STATION:
2435 /* Use aux roc framework (HS20) */ 2450 /* Use aux roc framework (HS20) */
2436 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 2451 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
2437 vif, duration); 2452 vif, duration);
2438 return ret; 2453 goto out_unlock;
2439 case NL80211_IFTYPE_P2P_DEVICE: 2454 case NL80211_IFTYPE_P2P_DEVICE:
2440 /* handle below */ 2455 /* handle below */
2441 break; 2456 break;
2442 default: 2457 default:
2443 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 2458 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
2444 return -EINVAL; 2459 ret = -EINVAL;
2460 goto out_unlock;
2445 } 2461 }
2446 2462
2447 mutex_lock(&mvm->mutex);
2448
2449 for (i = 0; i < NUM_PHY_CTX; i++) { 2463 for (i = 0; i < NUM_PHY_CTX; i++) {
2450 phy_ctxt = &mvm->phy_ctxts[i]; 2464 phy_ctxt = &mvm->phy_ctxts[i];
2451 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 2465 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 15aa298ee79c..48cb25a93591 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -336,6 +336,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
336 CMD(DTS_MEASUREMENT_NOTIFICATION), 336 CMD(DTS_MEASUREMENT_NOTIFICATION),
337 CMD(REPLY_THERMAL_MNG_BACKOFF), 337 CMD(REPLY_THERMAL_MNG_BACKOFF),
338 CMD(MAC_PM_POWER_TABLE), 338 CMD(MAC_PM_POWER_TABLE),
339 CMD(LTR_CONFIG),
339 CMD(BT_COEX_CI), 340 CMD(BT_COEX_CI),
340 CMD(BT_COEX_UPDATE_SW_BOOST), 341 CMD(BT_COEX_UPDATE_SW_BOOST),
341 CMD(BT_COEX_UPDATE_CORUN_LUT), 342 CMD(BT_COEX_UPDATE_CORUN_LUT),
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index cb85e63c20aa..b280d5d87127 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -459,7 +459,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
459 basic_ssid ? 1 : 0); 459 basic_ssid ? 1 : 0);
460 460
461 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | 461 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
462 TX_CMD_FLG_BT_DIS); 462 3 << TX_CMD_FLG_BT_PRIO_POS);
463
463 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; 464 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
464 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 465 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
465 cmd->tx_cmd.rate_n_flags = 466 cmd->tx_cmd.rate_n_flags =
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b7f9e61d14e2..6dfad230be5e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -305,8 +305,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
305 te_data->running = false; 305 te_data->running = false;
306 te_data->vif = NULL; 306 te_data->vif = NULL;
307 te_data->uid = 0; 307 te_data->uid = 0;
308 te_data->id = TE_MAX;
308 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { 309 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
309 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
310 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); 310 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
311 te_data->running = true; 311 te_data->running = true;
312 ieee80211_ready_on_channel(mvm->hw); /* Start TE */ 312 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 1cb793a498ac..c6a517c771df 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -175,14 +175,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
175 175
176 /* 176 /*
177 * for data packets, rate info comes from the table inside the fw. This 177 * for data packets, rate info comes from the table inside the fw. This
178 * table is controlled by LINK_QUALITY commands. Exclude ctrl port 178 * table is controlled by LINK_QUALITY commands
179 * frames like EAPOLs which should be treated as mgmt frames. This
180 * avoids them being sent initially in high rates which increases the
181 * chances for completion of the 4-Way handshake.
182 */ 179 */
183 180
184 if (ieee80211_is_data(fc) && sta && 181 if (ieee80211_is_data(fc) && sta) {
185 !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
186 tx_cmd->initial_rate_index = 0; 182 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 183 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
188 return; 184 return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 1393bac0025c..3781b029e54a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -174,6 +174,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
174{ 174{
175 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 175 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
176 u16 lctl; 176 u16 lctl;
177 u16 cap;
177 178
178 /* 179 /*
179 * HW bug W/A for instability in PCIe bus L0S->L1 transition. 180 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
@@ -184,16 +185,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
184 * power savings, even without L1. 185 * power savings, even without L1.
185 */ 186 */
186 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 187 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
187 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { 188 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
188 /* L1-ASPM enabled; disable(!) L0S */
189 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 189 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
190 dev_info(trans->dev, "L1 Enabled; Disabling L0S\n"); 190 else
191 } else {
192 /* L1-ASPM disabled; enable(!) L0S */
193 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 191 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
194 dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
195 }
196 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 192 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
193
194 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
195 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
196 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
197 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
198 trans->ltr_enabled ? "En" : "Dis");
197} 199}
198 200
199/* 201/*
@@ -428,7 +430,7 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
428 ret = iwl_poll_bit(trans, CSR_RESET, 430 ret = iwl_poll_bit(trans, CSR_RESET,
429 CSR_RESET_REG_FLAG_MASTER_DISABLED, 431 CSR_RESET_REG_FLAG_MASTER_DISABLED,
430 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 432 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
431 if (ret) 433 if (ret < 0)
432 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 434 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
433 435
434 IWL_DEBUG_INFO(trans, "stop master\n"); 436 IWL_DEBUG_INFO(trans, "stop master\n");
@@ -544,7 +546,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
544 msleep(25); 546 msleep(25);
545 } 547 }
546 548
547 IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter); 549 IWL_ERR(trans, "Couldn't prepare the card\n");
548 550
549 return ret; 551 return ret;
550} 552}
@@ -1043,7 +1045,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1043 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1045 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1044 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1046 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1045 25000); 1047 25000);
1046 if (ret) { 1048 if (ret < 0) {
1047 IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); 1049 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1048 return ret; 1050 return ret;
1049 } 1051 }
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 40057079ffb9..5ef5a0eeba50 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -196,6 +196,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
196 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); 196 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
197 197
198 del_timer_sync(&tbl->timer_context.timer); 198 del_timer_sync(&tbl->timer_context.timer);
199 tbl->timer_context.timer_is_set = false;
199 200
200 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 201 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
201 list_del(&tbl->list); 202 list_del(&tbl->list);
@@ -297,6 +298,7 @@ mwifiex_flush_data(unsigned long context)
297 (struct reorder_tmr_cnxt *) context; 298 (struct reorder_tmr_cnxt *) context;
298 int start_win, seq_num; 299 int start_win, seq_num;
299 300
301 ctx->timer_is_set = false;
300 seq_num = mwifiex_11n_find_last_seq_num(ctx); 302 seq_num = mwifiex_11n_find_last_seq_num(ctx);
301 303
302 if (seq_num < 0) 304 if (seq_num < 0)
@@ -385,6 +387,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
385 387
386 new_node->timer_context.ptr = new_node; 388 new_node->timer_context.ptr = new_node;
387 new_node->timer_context.priv = priv; 389 new_node->timer_context.priv = priv;
390 new_node->timer_context.timer_is_set = false;
388 391
389 init_timer(&new_node->timer_context.timer); 392 init_timer(&new_node->timer_context.timer);
390 new_node->timer_context.timer.function = mwifiex_flush_data; 393 new_node->timer_context.timer.function = mwifiex_flush_data;
@@ -399,6 +402,22 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
399 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 402 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
400} 403}
401 404
405static void
406mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl)
407{
408 u32 min_flush_time;
409
410 if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32)
411 min_flush_time = MIN_FLUSH_TIMER_15_MS;
412 else
413 min_flush_time = MIN_FLUSH_TIMER_MS;
414
415 mod_timer(&tbl->timer_context.timer,
416 jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size));
417
418 tbl->timer_context.timer_is_set = true;
419}
420
402/* 421/*
403 * This function prepares command for adding a BA request. 422 * This function prepares command for adding a BA request.
404 * 423 *
@@ -523,31 +542,31 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
523 u8 *ta, u8 pkt_type, void *payload) 542 u8 *ta, u8 pkt_type, void *payload)
524{ 543{
525 struct mwifiex_rx_reorder_tbl *tbl; 544 struct mwifiex_rx_reorder_tbl *tbl;
526 int start_win, end_win, win_size; 545 int prev_start_win, start_win, end_win, win_size;
527 u16 pkt_index; 546 u16 pkt_index;
528 bool init_window_shift = false; 547 bool init_window_shift = false;
548 int ret = 0;
529 549
530 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 550 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
531 if (!tbl) { 551 if (!tbl) {
532 if (pkt_type != PKT_TYPE_BAR) 552 if (pkt_type != PKT_TYPE_BAR)
533 mwifiex_11n_dispatch_pkt(priv, payload); 553 mwifiex_11n_dispatch_pkt(priv, payload);
534 return 0; 554 return ret;
535 } 555 }
536 556
537 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { 557 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
538 mwifiex_11n_dispatch_pkt(priv, payload); 558 mwifiex_11n_dispatch_pkt(priv, payload);
539 return 0; 559 return ret;
540 } 560 }
541 561
542 start_win = tbl->start_win; 562 start_win = tbl->start_win;
563 prev_start_win = start_win;
543 win_size = tbl->win_size; 564 win_size = tbl->win_size;
544 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); 565 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
545 if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) { 566 if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
546 init_window_shift = true; 567 init_window_shift = true;
547 tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT; 568 tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
548 } 569 }
549 mod_timer(&tbl->timer_context.timer,
550 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
551 570
552 if (tbl->flags & RXREOR_FORCE_NO_DROP) { 571 if (tbl->flags & RXREOR_FORCE_NO_DROP) {
553 dev_dbg(priv->adapter->dev, 572 dev_dbg(priv->adapter->dev,
@@ -568,11 +587,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
568 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) { 587 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
569 if (seq_num >= ((start_win + TWOPOW11) & 588 if (seq_num >= ((start_win + TWOPOW11) &
570 (MAX_TID_VALUE - 1)) && 589 (MAX_TID_VALUE - 1)) &&
571 seq_num < start_win) 590 seq_num < start_win) {
572 return -1; 591 ret = -1;
592 goto done;
593 }
573 } else if ((seq_num < start_win) || 594 } else if ((seq_num < start_win) ||
574 (seq_num > (start_win + TWOPOW11))) { 595 (seq_num >= (start_win + TWOPOW11))) {
575 return -1; 596 ret = -1;
597 goto done;
576 } 598 }
577 } 599 }
578 600
@@ -601,8 +623,10 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
601 else 623 else
602 pkt_index = (seq_num+MAX_TID_VALUE) - start_win; 624 pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
603 625
604 if (tbl->rx_reorder_ptr[pkt_index]) 626 if (tbl->rx_reorder_ptr[pkt_index]) {
605 return -1; 627 ret = -1;
628 goto done;
629 }
606 630
607 tbl->rx_reorder_ptr[pkt_index] = payload; 631 tbl->rx_reorder_ptr[pkt_index] = payload;
608 } 632 }
@@ -613,7 +637,11 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
613 */ 637 */
614 mwifiex_11n_scan_and_dispatch(priv, tbl); 638 mwifiex_11n_scan_and_dispatch(priv, tbl);
615 639
616 return 0; 640done:
641 if (!tbl->timer_context.timer_is_set ||
642 prev_start_win != tbl->start_win)
643 mwifiex_11n_rxreorder_timer_restart(tbl);
644 return ret;
617} 645}
618 646
619/* 647/*
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 3a87bb0e3a62..63ecea89b4ab 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -21,6 +21,8 @@
21#define _MWIFIEX_11N_RXREORDER_H_ 21#define _MWIFIEX_11N_RXREORDER_H_
22 22
23#define MIN_FLUSH_TIMER_MS 50 23#define MIN_FLUSH_TIMER_MS 50
24#define MIN_FLUSH_TIMER_15_MS 15
25#define MWIFIEX_BA_WIN_SIZE_32 32
24 26
25#define PKT_TYPE_BAR 0xE7 27#define PKT_TYPE_BAR 0xE7
26#define MAX_TID_VALUE (2 << 11) 28#define MAX_TID_VALUE (2 << 11)
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e2635747d966..f55658d15c60 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -592,6 +592,7 @@ struct reorder_tmr_cnxt {
592 struct timer_list timer; 592 struct timer_list timer;
593 struct mwifiex_rx_reorder_tbl *ptr; 593 struct mwifiex_rx_reorder_tbl *ptr;
594 struct mwifiex_private *priv; 594 struct mwifiex_private *priv;
595 u8 timer_is_set;
595}; 596};
596 597
597struct mwifiex_rx_reorder_tbl { 598struct mwifiex_rx_reorder_tbl {
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 573897b8e878..8444313eabe2 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1111,6 +1111,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1111 /* Ovislink */ 1111 /* Ovislink */
1112 { USB_DEVICE(0x1b75, 0x3071) }, 1112 { USB_DEVICE(0x1b75, 0x3071) },
1113 { USB_DEVICE(0x1b75, 0x3072) }, 1113 { USB_DEVICE(0x1b75, 0x3072) },
1114 { USB_DEVICE(0x1b75, 0xa200) },
1114 /* Para */ 1115 /* Para */
1115 { USB_DEVICE(0x20b8, 0x8888) }, 1116 { USB_DEVICE(0x20b8, 0x8888) },
1116 /* Pegatron */ 1117 /* Pegatron */
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 58ba71830886..40b6d1d006d7 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -467,7 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
467 rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); 467 rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
468 /* <2> work queue */ 468 /* <2> work queue */
469 rtlpriv->works.hw = hw; 469 rtlpriv->works.hw = hw;
470 rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); 470 rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
471 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, 471 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
472 (void *)rtl_watchdog_wq_callback); 472 (void *)rtl_watchdog_wq_callback);
473 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, 473 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index f6179bc06086..07dae0d44abc 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1828,3 +1828,9 @@ const struct ieee80211_ops rtl_ops = {
1828 .flush = rtl_op_flush, 1828 .flush = rtl_op_flush,
1829}; 1829};
1830EXPORT_SYMBOL_GPL(rtl_ops); 1830EXPORT_SYMBOL_GPL(rtl_ops);
1831
1832bool rtl_btc_status_false(void)
1833{
1834 return false;
1835}
1836EXPORT_SYMBOL_GPL(rtl_btc_status_false);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 59cd3b9dca25..624e1dc16d31 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -42,5 +42,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
42 u32 mask, u32 data); 42 u32 mask, u32 data);
43void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); 43void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
44bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); 44bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
45bool rtl_btc_status_false(void);
45 46
46#endif 47#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 667aba81246c..25daa8715219 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1796,7 +1796,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
1796 rtl_pci_reset_trx_ring(hw); 1796 rtl_pci_reset_trx_ring(hw);
1797 1797
1798 rtlpci->driver_is_goingto_unload = false; 1798 rtlpci->driver_is_goingto_unload = false;
1799 if (rtlpriv->cfg->ops->get_btc_status()) { 1799 if (rtlpriv->cfg->ops->get_btc_status &&
1800 rtlpriv->cfg->ops->get_btc_status()) {
1800 rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); 1801 rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
1801 rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); 1802 rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
1802 } 1803 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index a00861b26ece..29983bc96a89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -656,7 +656,8 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
657}; 657};
658 658
659void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) 659void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
660 bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *))
660{ 661{
661 struct rtl_priv *rtlpriv = rtl_priv(hw); 662 struct rtl_priv *rtlpriv = rtl_priv(hw);
662 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 663 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -722,7 +723,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
722 memcpy((u8 *)skb_put(skb, totalpacketlen), 723 memcpy((u8 *)skb_put(skb, totalpacketlen),
723 &reserved_page_packet, totalpacketlen); 724 &reserved_page_packet, totalpacketlen);
724 725
725 rtstatus = rtl_cmd_send_packet(hw, skb); 726 if (cmd_send_packet)
727 rtstatus = cmd_send_packet(hw, skb);
728 else
729 rtstatus = rtl_cmd_send_packet(hw, skb);
726 730
727 if (rtstatus) 731 if (rtstatus)
728 b_dlok = true; 732 b_dlok = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index a815bd6273da..b64ae45dc674 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -109,7 +109,9 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
109 u32 cmd_len, u8 *p_cmdbuffer); 109 u32 cmd_len, u8 *p_cmdbuffer);
110void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); 110void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
111void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 111void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
112void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 112void rtl92c_set_fw_rsvdpagepkt
113 (struct ieee80211_hw *hw,
114 bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *));
113void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 115void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
114void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len); 116void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
115void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); 117void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 831df101d7b7..9b660df6fd71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -114,6 +114,8 @@
114 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) 114 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
115#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ 115#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) 116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
117#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
118 SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
117 119
118#define CHIP_VER_B BIT(4) 120#define CHIP_VER_B BIT(4)
119#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) 121#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 8ec0f031f48a..55357d69397a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -459,7 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
459 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, 459 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
460 tmp_reg422 & (~BIT(6))); 460 tmp_reg422 & (~BIT(6)));
461 461
462 rtl92c_set_fw_rsvdpagepkt(hw, 0); 462 rtl92c_set_fw_rsvdpagepkt(hw, NULL);
463 463
464 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); 464 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
465 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); 465 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index d86b5b566444..46ea07605eb4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -244,6 +244,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
244 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, 244 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
245 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback, 245 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
246 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, 246 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
247 .get_btc_status = rtl_btc_status_false,
247}; 248};
248 249
249static struct rtl_mod_params rtl92ce_mod_params = { 250static struct rtl_mod_params rtl92ce_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 2fb9c7acb76a..dc3d20b17a26 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -728,6 +728,9 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
728 case HW_DESC_RXPKT_LEN: 728 case HW_DESC_RXPKT_LEN:
729 ret = GET_RX_DESC_PKT_LEN(pdesc); 729 ret = GET_RX_DESC_PKT_LEN(pdesc);
730 break; 730 break;
731 case HW_DESC_RXBUFF_ADDR:
732 ret = GET_RX_STATUS_DESC_BUFF_ADDR(pdesc);
733 break;
731 default: 734 default:
732 RT_ASSERT(false, "ERR rxdesc :%d not process\n", 735 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
733 desc_name); 736 desc_name);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 04aa0b5f5b3d..873363acbacf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1592,6 +1592,20 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1592 } 1592 }
1593} 1593}
1594 1594
1595bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
1596{
1597 /* Currently nothing happens here.
1598 * Traffic stops after some seconds in WPA2 802.11n mode.
1599 * Maybe because rtl8192cu chip should be set from here?
1600 * If I understand correctly, the realtek vendor driver sends some urbs
1601 * if its "here".
1602 *
1603 * This is maybe necessary:
1604 * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
1605 */
1606 return true;
1607}
1608
1595void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) 1609void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1596{ 1610{
1597 struct rtl_priv *rtlpriv = rtl_priv(hw); 1611 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1939,7 +1953,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1939 recover = true; 1953 recover = true;
1940 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, 1954 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
1941 tmp_reg422 & (~BIT(6))); 1955 tmp_reg422 & (~BIT(6)));
1942 rtl92c_set_fw_rsvdpagepkt(hw, 0); 1956 rtl92c_set_fw_rsvdpagepkt(hw,
1957 &usb_cmd_send_packet);
1943 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0); 1958 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
1944 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); 1959 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1945 if (recover) 1960 if (recover)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 0f7812e0c8aa..c1e33b0228c0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -104,7 +104,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
104void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); 104void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
105int rtl92c_download_fw(struct ieee80211_hw *hw); 105int rtl92c_download_fw(struct ieee80211_hw *hw);
106void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 106void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
107void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
108void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 107void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
109void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, 108void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
110 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); 109 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 7c5fbaf5fee0..e06bafee37f9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -101,6 +101,12 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
101 } 101 }
102} 102}
103 103
104/* get bt coexist status */
105static bool rtl92cu_get_btc_status(void)
106{
107 return false;
108}
109
104static struct rtl_hal_ops rtl8192cu_hal_ops = { 110static struct rtl_hal_ops rtl8192cu_hal_ops = {
105 .init_sw_vars = rtl92cu_init_sw_vars, 111 .init_sw_vars = rtl92cu_init_sw_vars,
106 .deinit_sw_vars = rtl92cu_deinit_sw_vars, 112 .deinit_sw_vars = rtl92cu_deinit_sw_vars,
@@ -148,6 +154,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
148 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, 154 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
149 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, 155 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
150 .fill_h2c_cmd = rtl92c_fill_h2c_cmd, 156 .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
157 .get_btc_status = rtl92cu_get_btc_status,
151}; 158};
152 159
153static struct rtl_mod_params rtl92cu_mod_params = { 160static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index edab5a5351b5..a0aba088259a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -251,6 +251,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
251 .get_rfreg = rtl92d_phy_query_rf_reg, 251 .get_rfreg = rtl92d_phy_query_rf_reg,
252 .set_rfreg = rtl92d_phy_set_rf_reg, 252 .set_rfreg = rtl92d_phy_set_rf_reg,
253 .linked_set_reg = rtl92d_linked_set_reg, 253 .linked_set_reg = rtl92d_linked_set_reg,
254 .get_btc_status = rtl_btc_status_false,
254}; 255};
255 256
256static struct rtl_mod_params rtl92de_mod_params = { 257static struct rtl_mod_params rtl92de_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index dfdc9b20e4ad..1a87edca2c3f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -362,7 +362,7 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
362 } 362 }
363 break; 363 break;
364 default: 364 default:
365 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 365 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
366 "switch case not process %x\n", variable); 366 "switch case not process %x\n", variable);
367 break; 367 break;
368 } 368 }
@@ -591,7 +591,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
591 acm_ctrl &= (~ACMHW_BEQEN); 591 acm_ctrl &= (~ACMHW_BEQEN);
592 break; 592 break;
593 default: 593 default:
594 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 594 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
595 "switch case not process\n"); 595 "switch case not process\n");
596 break; 596 break;
597 } 597 }
@@ -710,7 +710,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
710 } 710 }
711 break; 711 break;
712 default: 712 default:
713 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 713 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
714 "switch case not process %x\n", variable); 714 "switch case not process %x\n", variable);
715 break; 715 break;
716 } 716 }
@@ -2424,7 +2424,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
2424 enc_algo = CAM_AES; 2424 enc_algo = CAM_AES;
2425 break; 2425 break;
2426 default: 2426 default:
2427 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2427 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
2428 "switch case not process\n"); 2428 "switch case not process\n");
2429 enc_algo = CAM_TKIP; 2429 enc_algo = CAM_TKIP;
2430 break; 2430 break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 83c98674bfd3..6e7a70b43949 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -446,6 +446,8 @@
446/* DWORD 6 */ 446/* DWORD 6 */
447#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \ 447#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
448 SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val) 448 SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
449#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
450 SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
449 451
450#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\ 452#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
451 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \ 453 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 1bff2a0f7600..aadba29c167a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -87,11 +87,8 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
87static void rtl92se_fw_cb(const struct firmware *firmware, void *context) 87static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
88{ 88{
89 struct ieee80211_hw *hw = context; 89 struct ieee80211_hw *hw = context;
90 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
91 struct rtl_priv *rtlpriv = rtl_priv(hw); 90 struct rtl_priv *rtlpriv = rtl_priv(hw);
92 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
93 struct rt_firmware *pfirmware = NULL; 91 struct rt_firmware *pfirmware = NULL;
94 int err;
95 92
96 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, 93 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
97 "Firmware callback routine entered!\n"); 94 "Firmware callback routine entered!\n");
@@ -112,20 +109,6 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
112 memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size); 109 memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
113 pfirmware->sz_fw_tmpbufferlen = firmware->size; 110 pfirmware->sz_fw_tmpbufferlen = firmware->size;
114 release_firmware(firmware); 111 release_firmware(firmware);
115
116 err = ieee80211_register_hw(hw);
117 if (err) {
118 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
119 "Can't register mac80211 hw\n");
120 return;
121 } else {
122 rtlpriv->mac80211.mac80211_registered = 1;
123 }
124 rtlpci->irq_alloc = 1;
125 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
126
127 /*init rfkill */
128 rtl_init_rfkill(hw);
129} 112}
130 113
131static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) 114static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
@@ -226,8 +209,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
226 if (!rtlpriv->rtlhal.pfirmware) 209 if (!rtlpriv->rtlhal.pfirmware)
227 return 1; 210 return 1;
228 211
229 rtlpriv->max_fw_size = RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE; 212 rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
230 213 sizeof(struct fw_hdr);
231 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" 214 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
232 "Loading firmware %s\n", rtlpriv->cfg->fw_name); 215 "Loading firmware %s\n", rtlpriv->cfg->fw_name);
233 /* request fw */ 216 /* request fw */
@@ -294,6 +277,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
294 .set_bbreg = rtl92s_phy_set_bb_reg, 277 .set_bbreg = rtl92s_phy_set_bb_reg,
295 .get_rfreg = rtl92s_phy_query_rf_reg, 278 .get_rfreg = rtl92s_phy_query_rf_reg,
296 .set_rfreg = rtl92s_phy_set_rf_reg, 279 .set_rfreg = rtl92s_phy_set_rf_reg,
280 .get_btc_status = rtl_btc_status_false,
297}; 281};
298 282
299static struct rtl_mod_params rtl92se_mod_params = { 283static struct rtl_mod_params rtl92se_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index b358ebce8942..672fd3b02835 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -640,6 +640,9 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
640 case HW_DESC_RXPKT_LEN: 640 case HW_DESC_RXPKT_LEN:
641 ret = GET_RX_STATUS_DESC_PKT_LEN(desc); 641 ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
642 break; 642 break;
643 case HW_DESC_RXBUFF_ADDR:
644 ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
645 break;
643 default: 646 default:
644 RT_ASSERT(false, "ERR rxdesc :%d not process\n", 647 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
645 desc_name); 648 desc_name);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
index 9786313dc62f..1e9570fa874f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
@@ -1889,15 +1889,18 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
1889 struct rtl_phy *rtlphy = &rtlpriv->phy; 1889 struct rtl_phy *rtlphy = &rtlpriv->phy;
1890 u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr); 1890 u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr);
1891 1891
1892 if (band != BAND_ON_2_4G && band != BAND_ON_5G) 1892 if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
1893 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band); 1893 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
1894 1894 band = BAND_ON_2_4G;
1895 if (rfpath >= MAX_RF_PATH) 1895 }
1896 if (rfpath >= MAX_RF_PATH) {
1896 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath); 1897 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
1897 1898 rfpath = MAX_RF_PATH - 1;
1898 if (txnum >= MAX_RF_PATH) 1899 }
1900 if (txnum >= MAX_RF_PATH) {
1899 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum); 1901 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
1900 1902 txnum = MAX_RF_PATH - 1;
1903 }
1901 rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data; 1904 rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
1902 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1905 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1903 "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n", 1906 "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 10cf69c4bc42..46ee956d0235 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -1117,7 +1117,18 @@ int rtl_usb_probe(struct usb_interface *intf,
1117 } 1117 }
1118 rtlpriv->cfg->ops->init_sw_leds(hw); 1118 rtlpriv->cfg->ops->init_sw_leds(hw);
1119 1119
1120 err = ieee80211_register_hw(hw);
1121 if (err) {
1122 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1123 "Can't register mac80211 hw.\n");
1124 err = -ENODEV;
1125 goto error_out;
1126 }
1127 rtlpriv->mac80211.mac80211_registered = 1;
1128
1129 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1120 return 0; 1130 return 0;
1131
1121error_out: 1132error_out:
1122 rtl_deinit_core(hw); 1133 rtl_deinit_core(hw);
1123 _rtl_usb_io_handler_release(hw); 1134 _rtl_usb_io_handler_release(hw);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index d4eb8d2e9cb7..083ecc93fe5e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -176,10 +176,11 @@ struct xenvif_queue { /* Per-queue data for xenvif */
176 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 176 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
177 struct xen_netif_rx_back_ring rx; 177 struct xen_netif_rx_back_ring rx;
178 struct sk_buff_head rx_queue; 178 struct sk_buff_head rx_queue;
179 RING_IDX rx_last_skb_slots;
180 unsigned long status;
181 179
182 struct timer_list rx_stalled; 180 unsigned int rx_queue_max;
181 unsigned int rx_queue_len;
182 unsigned long last_rx_time;
183 bool stalled;
183 184
184 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; 185 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
185 186
@@ -199,18 +200,14 @@ struct xenvif_queue { /* Per-queue data for xenvif */
199 struct xenvif_stats stats; 200 struct xenvif_stats stats;
200}; 201};
201 202
203/* Maximum number of Rx slots a to-guest packet may use, including the
204 * slot needed for GSO meta-data.
205 */
206#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
207
202enum state_bit_shift { 208enum state_bit_shift {
203 /* This bit marks that the vif is connected */ 209 /* This bit marks that the vif is connected */
204 VIF_STATUS_CONNECTED, 210 VIF_STATUS_CONNECTED,
205 /* This bit signals the RX thread that queuing was stopped (in
206 * start_xmit), and either the timer fired or an RX interrupt came
207 */
208 QUEUE_STATUS_RX_PURGE_EVENT,
209 /* This bit tells the interrupt handler that this queue was the reason
210 * for the carrier off, so it should kick the thread. Only queues which
211 * brought it down can turn on the carrier.
212 */
213 QUEUE_STATUS_RX_STALLED
214}; 211};
215 212
216struct xenvif { 213struct xenvif {
@@ -228,9 +225,6 @@ struct xenvif {
228 u8 ip_csum:1; 225 u8 ip_csum:1;
229 u8 ipv6_csum:1; 226 u8 ipv6_csum:1;
230 227
231 /* Internal feature information. */
232 u8 can_queue:1; /* can queue packets for receiver? */
233
234 /* Is this interface disabled? True when backend discovers 228 /* Is this interface disabled? True when backend discovers
235 * frontend is rogue. 229 * frontend is rogue.
236 */ 230 */
@@ -240,6 +234,9 @@ struct xenvif {
240 /* Queues */ 234 /* Queues */
241 struct xenvif_queue *queues; 235 struct xenvif_queue *queues;
242 unsigned int num_queues; /* active queues, resource allocated */ 236 unsigned int num_queues; /* active queues, resource allocated */
237 unsigned int stalled_queues;
238
239 spinlock_t lock;
243 240
244#ifdef CONFIG_DEBUG_FS 241#ifdef CONFIG_DEBUG_FS
245 struct dentry *xenvif_dbg_root; 242 struct dentry *xenvif_dbg_root;
@@ -249,6 +246,14 @@ struct xenvif {
249 struct net_device *dev; 246 struct net_device *dev;
250}; 247};
251 248
249struct xenvif_rx_cb {
250 unsigned long expires;
251 int meta_slots_used;
252 bool full_coalesce;
253};
254
255#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
256
252static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) 257static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
253{ 258{
254 return to_xenbus_device(vif->dev->dev.parent); 259 return to_xenbus_device(vif->dev->dev.parent);
@@ -272,8 +277,6 @@ void xenvif_xenbus_fini(void);
272 277
273int xenvif_schedulable(struct xenvif *vif); 278int xenvif_schedulable(struct xenvif *vif);
274 279
275int xenvif_must_stop_queue(struct xenvif_queue *queue);
276
277int xenvif_queue_stopped(struct xenvif_queue *queue); 280int xenvif_queue_stopped(struct xenvif_queue *queue);
278void xenvif_wake_queue(struct xenvif_queue *queue); 281void xenvif_wake_queue(struct xenvif_queue *queue);
279 282
@@ -296,6 +299,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
296 299
297int xenvif_dealloc_kthread(void *data); 300int xenvif_dealloc_kthread(void *data);
298 301
302void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
303
299/* Determine whether the needed number of slots (req) are available, 304/* Determine whether the needed number of slots (req) are available,
300 * and set req_event if not. 305 * and set req_event if not.
301 */ 306 */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7342a6bb5557..a6a32d337bbb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,9 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
46/* This function is used to set SKBTX_DEV_ZEROCOPY as well as 49/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
47 * increasing the inflight counter. We need to increase the inflight 50 * increasing the inflight counter. We need to increase the inflight
48 * counter because core driver calls into xenvif_zerocopy_callback 51 * counter because core driver calls into xenvif_zerocopy_callback
@@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
60 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
61} 64}
62 65
63static inline void xenvif_stop_queue(struct xenvif_queue *queue)
64{
65 struct net_device *dev = queue->vif->dev;
66
67 if (!queue->vif->can_queue)
68 return;
69
70 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
71}
72
73int xenvif_schedulable(struct xenvif *vif) 66int xenvif_schedulable(struct xenvif *vif)
74{ 67{
75 return netif_running(vif->dev) && 68 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status); 69 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
70 !vif->disabled;
77} 71}
78 72
79static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 73static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)
114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 108static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{ 109{
116 struct xenvif_queue *queue = dev_id; 110 struct xenvif_queue *queue = dev_id;
117 struct netdev_queue *net_queue =
118 netdev_get_tx_queue(queue->vif->dev, queue->id);
119 111
120 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
121 * the carrier went down and this queue was previously blocked
122 */
123 if (unlikely(netif_tx_queue_stopped(net_queue) ||
124 (!netif_carrier_ok(queue->vif->dev) &&
125 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
126 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
127 xenvif_kick_thread(queue); 112 xenvif_kick_thread(queue);
128 113
129 return IRQ_HANDLED; 114 return IRQ_HANDLED;
@@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
151 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 136 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
152} 137}
153 138
154/* Callback to wake the queue's thread and turn the carrier off on timeout */
155static void xenvif_rx_stalled(unsigned long data)
156{
157 struct xenvif_queue *queue = (struct xenvif_queue *)data;
158
159 if (xenvif_queue_stopped(queue)) {
160 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
161 xenvif_kick_thread(queue);
162 }
163}
164
165static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 139static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
166{ 140{
167 struct xenvif *vif = netdev_priv(dev); 141 struct xenvif *vif = netdev_priv(dev);
168 struct xenvif_queue *queue = NULL; 142 struct xenvif_queue *queue = NULL;
169 unsigned int num_queues = vif->num_queues; 143 unsigned int num_queues = vif->num_queues;
170 u16 index; 144 u16 index;
171 int min_slots_needed; 145 struct xenvif_rx_cb *cb;
172 146
173 BUG_ON(skb->dev != dev); 147 BUG_ON(skb->dev != dev);
174 148
@@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
191 !xenvif_schedulable(vif)) 165 !xenvif_schedulable(vif))
192 goto drop; 166 goto drop;
193 167
194 /* At best we'll need one slot for the header and one for each 168 cb = XENVIF_RX_CB(skb);
195 * frag. 169 cb->expires = jiffies + rx_drain_timeout_jiffies;
196 */
197 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
198
199 /* If the skb is GSO then we'll also need an extra slot for the
200 * metadata.
201 */
202 if (skb_is_gso(skb))
203 min_slots_needed++;
204 170
205 /* If the skb can't possibly fit in the remaining slots 171 xenvif_rx_queue_tail(queue, skb);
206 * then turn off the queue to give the ring a chance to
207 * drain.
208 */
209 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
210 queue->rx_stalled.function = xenvif_rx_stalled;
211 queue->rx_stalled.data = (unsigned long)queue;
212 xenvif_stop_queue(queue);
213 mod_timer(&queue->rx_stalled,
214 jiffies + rx_drain_timeout_jiffies);
215 }
216
217 skb_queue_tail(&queue->rx_queue, skb);
218 xenvif_kick_thread(queue); 172 xenvif_kick_thread(queue);
219 173
220 return NETDEV_TX_OK; 174 return NETDEV_TX_OK;
@@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
465 vif->queues = NULL; 419 vif->queues = NULL;
466 vif->num_queues = 0; 420 vif->num_queues = 0;
467 421
422 spin_lock_init(&vif->lock);
423
468 dev->netdev_ops = &xenvif_netdev_ops; 424 dev->netdev_ops = &xenvif_netdev_ops;
469 dev->hw_features = NETIF_F_SG | 425 dev->hw_features = NETIF_F_SG |
470 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 426 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
508 init_timer(&queue->credit_timeout); 464 init_timer(&queue->credit_timeout);
509 queue->credit_window_start = get_jiffies_64(); 465 queue->credit_window_start = get_jiffies_64();
510 466
467 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
468
511 skb_queue_head_init(&queue->rx_queue); 469 skb_queue_head_init(&queue->rx_queue);
512 skb_queue_head_init(&queue->tx_queue); 470 skb_queue_head_init(&queue->tx_queue);
513 471
@@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
539 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 497 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
540 } 498 }
541 499
542 init_timer(&queue->rx_stalled);
543
544 return 0; 500 return 0;
545} 501}
546 502
@@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif)
551 dev_set_mtu(vif->dev, ETH_DATA_LEN); 507 dev_set_mtu(vif->dev, ETH_DATA_LEN);
552 netdev_update_features(vif->dev); 508 netdev_update_features(vif->dev);
553 set_bit(VIF_STATUS_CONNECTED, &vif->status); 509 set_bit(VIF_STATUS_CONNECTED, &vif->status);
554 netif_carrier_on(vif->dev);
555 if (netif_running(vif->dev)) 510 if (netif_running(vif->dev))
556 xenvif_up(vif); 511 xenvif_up(vif);
557 rtnl_unlock(); 512 rtnl_unlock();
@@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
611 disable_irq(queue->rx_irq); 566 disable_irq(queue->rx_irq);
612 } 567 }
613 568
569 queue->stalled = true;
570
614 task = kthread_create(xenvif_kthread_guest_rx, 571 task = kthread_create(xenvif_kthread_guest_rx,
615 (void *)queue, "%s-guest-rx", queue->name); 572 (void *)queue, "%s-guest-rx", queue->name);
616 if (IS_ERR(task)) { 573 if (IS_ERR(task)) {
@@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif)
674 netif_napi_del(&queue->napi); 631 netif_napi_del(&queue->napi);
675 632
676 if (queue->task) { 633 if (queue->task) {
677 del_timer_sync(&queue->rx_stalled);
678 kthread_stop(queue->task); 634 kthread_stop(queue->task);
679 queue->task = NULL; 635 queue->task = NULL;
680 } 636 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 85fec0fb4ec2..45755f9aa3f9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -55,13 +55,20 @@
55bool separate_tx_rx_irq = 1; 55bool separate_tx_rx_irq = 1;
56module_param(separate_tx_rx_irq, bool, 0644); 56module_param(separate_tx_rx_irq, bool, 0644);
57 57
58/* When guest ring is filled up, qdisc queues the packets for us, but we have 58/* The time that packets can stay on the guest Rx internal queue
59 * to timeout them, otherwise other guests' packets can get stuck there 59 * before they are dropped.
60 */ 60 */
61unsigned int rx_drain_timeout_msecs = 10000; 61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444); 62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies; 63unsigned int rx_drain_timeout_jiffies;
64 64
65/* The length of time before the frontend is considered unresponsive
66 * because it isn't providing Rx slots.
67 */
68static unsigned int rx_stall_timeout_msecs = 60000;
69module_param(rx_stall_timeout_msecs, uint, 0444);
70static unsigned int rx_stall_timeout_jiffies;
71
65unsigned int xenvif_max_queues; 72unsigned int xenvif_max_queues;
66module_param_named(max_queues, xenvif_max_queues, uint, 0644); 73module_param_named(max_queues, xenvif_max_queues, uint, 0644);
67MODULE_PARM_DESC(max_queues, 74MODULE_PARM_DESC(max_queues,
@@ -83,7 +90,6 @@ static void make_tx_response(struct xenvif_queue *queue,
83 s8 st); 90 s8 st);
84 91
85static inline int tx_work_todo(struct xenvif_queue *queue); 92static inline int tx_work_todo(struct xenvif_queue *queue);
86static inline int rx_work_todo(struct xenvif_queue *queue);
87 93
88static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 94static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
89 u16 id, 95 u16 id,
@@ -163,6 +169,69 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
163 return false; 169 return false;
164} 170}
165 171
172void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
173{
174 unsigned long flags;
175
176 spin_lock_irqsave(&queue->rx_queue.lock, flags);
177
178 __skb_queue_tail(&queue->rx_queue, skb);
179
180 queue->rx_queue_len += skb->len;
181 if (queue->rx_queue_len > queue->rx_queue_max)
182 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
183
184 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
185}
186
187static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
188{
189 struct sk_buff *skb;
190
191 spin_lock_irq(&queue->rx_queue.lock);
192
193 skb = __skb_dequeue(&queue->rx_queue);
194 if (skb)
195 queue->rx_queue_len -= skb->len;
196
197 spin_unlock_irq(&queue->rx_queue.lock);
198
199 return skb;
200}
201
202static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
203{
204 spin_lock_irq(&queue->rx_queue.lock);
205
206 if (queue->rx_queue_len < queue->rx_queue_max)
207 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
208
209 spin_unlock_irq(&queue->rx_queue.lock);
210}
211
212
213static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
214{
215 struct sk_buff *skb;
216 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
217 kfree_skb(skb);
218}
219
220static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
221{
222 struct sk_buff *skb;
223
224 for(;;) {
225 skb = skb_peek(&queue->rx_queue);
226 if (!skb)
227 break;
228 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
229 break;
230 xenvif_rx_dequeue(queue);
231 kfree_skb(skb);
232 }
233}
234
166/* 235/*
167 * Returns true if we should start a new receive buffer instead of 236 * Returns true if we should start a new receive buffer instead of
168 * adding 'size' bytes to a buffer which currently contains 'offset' 237 * adding 'size' bytes to a buffer which currently contains 'offset'
@@ -237,13 +306,6 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
237 return meta; 306 return meta;
238} 307}
239 308
240struct xenvif_rx_cb {
241 int meta_slots_used;
242 bool full_coalesce;
243};
244
245#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
246
247/* 309/*
248 * Set up the grant operations for this fragment. If it's a flipping 310 * Set up the grant operations for this fragment. If it's a flipping
249 * interface, we also set up the unmap request from here. 311 * interface, we also set up the unmap request from here.
@@ -587,12 +649,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
587 649
588 skb_queue_head_init(&rxq); 650 skb_queue_head_init(&rxq);
589 651
590 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { 652 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
653 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
591 RING_IDX max_slots_needed; 654 RING_IDX max_slots_needed;
592 RING_IDX old_req_cons; 655 RING_IDX old_req_cons;
593 RING_IDX ring_slots_used; 656 RING_IDX ring_slots_used;
594 int i; 657 int i;
595 658
659 queue->last_rx_time = jiffies;
660
596 /* We need a cheap worse case estimate for the number of 661 /* We need a cheap worse case estimate for the number of
597 * slots we'll use. 662 * slots we'll use.
598 */ 663 */
@@ -634,15 +699,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
634 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 699 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
635 max_slots_needed++; 700 max_slots_needed++;
636 701
637 /* If the skb may not fit then bail out now */
638 if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
639 skb_queue_head(&queue->rx_queue, skb);
640 need_to_notify = true;
641 queue->rx_last_skb_slots = max_slots_needed;
642 break;
643 } else
644 queue->rx_last_skb_slots = 0;
645
646 old_req_cons = queue->rx.req_cons; 702 old_req_cons = queue->rx.req_cons;
647 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 703 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
648 ring_slots_used = queue->rx.req_cons - old_req_cons; 704 ring_slots_used = queue->rx.req_cons - old_req_cons;
@@ -1869,12 +1925,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1869 } 1925 }
1870} 1926}
1871 1927
1872static inline int rx_work_todo(struct xenvif_queue *queue)
1873{
1874 return (!skb_queue_empty(&queue->rx_queue) &&
1875 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
1876}
1877
1878static inline int tx_work_todo(struct xenvif_queue *queue) 1928static inline int tx_work_todo(struct xenvif_queue *queue)
1879{ 1929{
1880 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) 1930 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
@@ -1931,92 +1981,121 @@ err:
1931 return err; 1981 return err;
1932} 1982}
1933 1983
1934static void xenvif_start_queue(struct xenvif_queue *queue) 1984static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1935{ 1985{
1936 if (xenvif_schedulable(queue->vif)) 1986 struct xenvif *vif = queue->vif;
1937 xenvif_wake_queue(queue); 1987
1988 queue->stalled = true;
1989
1990 /* At least one queue has stalled? Disable the carrier. */
1991 spin_lock(&vif->lock);
1992 if (vif->stalled_queues++ == 0) {
1993 netdev_info(vif->dev, "Guest Rx stalled");
1994 netif_carrier_off(vif->dev);
1995 }
1996 spin_unlock(&vif->lock);
1938} 1997}
1939 1998
1940/* Only called from the queue's thread, it handles the situation when the guest 1999static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1941 * doesn't post enough requests on the receiving ring.
1942 * First xenvif_start_xmit disables QDisc and start a timer, and then either the
1943 * timer fires, or the guest send an interrupt after posting new request. If it
1944 * is the timer, the carrier is turned off here.
1945 * */
1946static void xenvif_rx_purge_event(struct xenvif_queue *queue)
1947{ 2000{
1948 /* Either the last unsuccesful skb or at least 1 slot should fit */ 2001 struct xenvif *vif = queue->vif;
1949 int needed = queue->rx_last_skb_slots ?
1950 queue->rx_last_skb_slots : 1;
1951 2002
1952 /* It is assumed that if the guest post new slots after this, the RX 2003 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1953 * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up 2004 queue->stalled = false;
1954 * the thread again
1955 */
1956 set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
1957 if (!xenvif_rx_ring_slots_available(queue, needed)) {
1958 rtnl_lock();
1959 if (netif_carrier_ok(queue->vif->dev)) {
1960 /* Timer fired and there are still no slots. Turn off
1961 * everything except the interrupts
1962 */
1963 netif_carrier_off(queue->vif->dev);
1964 skb_queue_purge(&queue->rx_queue);
1965 queue->rx_last_skb_slots = 0;
1966 if (net_ratelimit())
1967 netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
1968 } else {
1969 /* Probably an another queue already turned the carrier
1970 * off, make sure nothing is stucked in the internal
1971 * queue of this queue
1972 */
1973 skb_queue_purge(&queue->rx_queue);
1974 queue->rx_last_skb_slots = 0;
1975 }
1976 rtnl_unlock();
1977 } else if (!netif_carrier_ok(queue->vif->dev)) {
1978 unsigned int num_queues = queue->vif->num_queues;
1979 unsigned int i;
1980 /* The carrier was down, but an interrupt kicked
1981 * the thread again after new requests were
1982 * posted
1983 */
1984 clear_bit(QUEUE_STATUS_RX_STALLED,
1985 &queue->status);
1986 rtnl_lock();
1987 netif_carrier_on(queue->vif->dev);
1988 netif_tx_wake_all_queues(queue->vif->dev);
1989 rtnl_unlock();
1990 2005
1991 for (i = 0; i < num_queues; i++) { 2006 /* All queues are ready? Enable the carrier. */
1992 struct xenvif_queue *temp = &queue->vif->queues[i]; 2007 spin_lock(&vif->lock);
2008 if (--vif->stalled_queues == 0) {
2009 netdev_info(vif->dev, "Guest Rx ready");
2010 netif_carrier_on(vif->dev);
2011 }
2012 spin_unlock(&vif->lock);
2013}
1993 2014
1994 xenvif_napi_schedule_or_enable_events(temp); 2015static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
1995 } 2016{
1996 if (net_ratelimit()) 2017 RING_IDX prod, cons;
1997 netdev_err(queue->vif->dev, "Carrier on again\n"); 2018
1998 } else { 2019 prod = queue->rx.sring->req_prod;
1999 /* Queuing were stopped, but the guest posted 2020 cons = queue->rx.req_cons;
2000 * new requests and sent an interrupt 2021
2001 */ 2022 return !queue->stalled
2002 clear_bit(QUEUE_STATUS_RX_STALLED, 2023 && prod - cons < XEN_NETBK_RX_SLOTS_MAX
2003 &queue->status); 2024 && time_after(jiffies,
2004 del_timer_sync(&queue->rx_stalled); 2025 queue->last_rx_time + rx_stall_timeout_jiffies);
2005 xenvif_start_queue(queue); 2026}
2027
2028static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2029{
2030 RING_IDX prod, cons;
2031
2032 prod = queue->rx.sring->req_prod;
2033 cons = queue->rx.req_cons;
2034
2035 return queue->stalled
2036 && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
2037}
2038
2039static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2040{
2041 return (!skb_queue_empty(&queue->rx_queue)
2042 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
2043 || xenvif_rx_queue_stalled(queue)
2044 || xenvif_rx_queue_ready(queue)
2045 || kthread_should_stop()
2046 || queue->vif->disabled;
2047}
2048
2049static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2050{
2051 struct sk_buff *skb;
2052 long timeout;
2053
2054 skb = skb_peek(&queue->rx_queue);
2055 if (!skb)
2056 return MAX_SCHEDULE_TIMEOUT;
2057
2058 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2059 return timeout < 0 ? 0 : timeout;
2060}
2061
2062/* Wait until the guest Rx thread has work.
2063 *
2064 * The timeout needs to be adjusted based on the current head of the
2065 * queue (and not just the head at the beginning). In particular, if
2066 * the queue is initially empty an infinite timeout is used and this
2067 * needs to be reduced when a skb is queued.
2068 *
2069 * This cannot be done with wait_event_timeout() because it only
2070 * calculates the timeout once.
2071 */
2072static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2073{
2074 DEFINE_WAIT(wait);
2075
2076 if (xenvif_have_rx_work(queue))
2077 return;
2078
2079 for (;;) {
2080 long ret;
2081
2082 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2083 if (xenvif_have_rx_work(queue))
2084 break;
2085 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2086 if (!ret)
2087 break;
2006 } 2088 }
2089 finish_wait(&queue->wq, &wait);
2007} 2090}
2008 2091
2009int xenvif_kthread_guest_rx(void *data) 2092int xenvif_kthread_guest_rx(void *data)
2010{ 2093{
2011 struct xenvif_queue *queue = data; 2094 struct xenvif_queue *queue = data;
2012 struct sk_buff *skb; 2095 struct xenvif *vif = queue->vif;
2013 2096
2014 while (!kthread_should_stop()) { 2097 for (;;) {
2015 wait_event_interruptible(queue->wq, 2098 xenvif_wait_for_rx_work(queue);
2016 rx_work_todo(queue) ||
2017 queue->vif->disabled ||
2018 test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
2019 kthread_should_stop());
2020 2099
2021 if (kthread_should_stop()) 2100 if (kthread_should_stop())
2022 break; 2101 break;
@@ -2028,35 +2107,38 @@ int xenvif_kthread_guest_rx(void *data)
2028 * context so we defer it here, if this thread is 2107 * context so we defer it here, if this thread is
2029 * associated with queue 0. 2108 * associated with queue 0.
2030 */ 2109 */
2031 if (unlikely(queue->vif->disabled && queue->id == 0)) { 2110 if (unlikely(vif->disabled && queue->id == 0)) {
2032 xenvif_carrier_off(queue->vif); 2111 xenvif_carrier_off(vif);
2033 } else if (unlikely(queue->vif->disabled)) { 2112 xenvif_rx_queue_purge(queue);
2034 /* kthread_stop() would be called upon this thread soon, 2113 continue;
2035 * be a bit proactive
2036 */
2037 skb_queue_purge(&queue->rx_queue);
2038 queue->rx_last_skb_slots = 0;
2039 } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
2040 &queue->status))) {
2041 xenvif_rx_purge_event(queue);
2042 } else if (!netif_carrier_ok(queue->vif->dev)) {
2043 /* Another queue stalled and turned the carrier off, so
2044 * purge the internal queue of queues which were not
2045 * blocked
2046 */
2047 skb_queue_purge(&queue->rx_queue);
2048 queue->rx_last_skb_slots = 0;
2049 } 2114 }
2050 2115
2051 if (!skb_queue_empty(&queue->rx_queue)) 2116 if (!skb_queue_empty(&queue->rx_queue))
2052 xenvif_rx_action(queue); 2117 xenvif_rx_action(queue);
2053 2118
2119 /* If the guest hasn't provided any Rx slots for a
2120 * while it's probably not responsive, drop the
2121 * carrier so packets are dropped earlier.
2122 */
2123 if (xenvif_rx_queue_stalled(queue))
2124 xenvif_queue_carrier_off(queue);
2125 else if (xenvif_rx_queue_ready(queue))
2126 xenvif_queue_carrier_on(queue);
2127
2128 /* Queued packets may have foreign pages from other
2129 * domains. These cannot be queued indefinitely as
2130 * this would starve guests of grant refs and transmit
2131 * slots.
2132 */
2133 xenvif_rx_queue_drop_expired(queue);
2134
2135 xenvif_rx_queue_maybe_wake(queue);
2136
2054 cond_resched(); 2137 cond_resched();
2055 } 2138 }
2056 2139
2057 /* Bin any remaining skbs */ 2140 /* Bin any remaining skbs */
2058 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) 2141 xenvif_rx_queue_purge(queue);
2059 dev_kfree_skb(skb);
2060 2142
2061 return 0; 2143 return 0;
2062} 2144}
@@ -2113,6 +2195,7 @@ static int __init netback_init(void)
2113 goto failed_init; 2195 goto failed_init;
2114 2196
2115 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); 2197 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
2198 rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
2116 2199
2117#ifdef CONFIG_DEBUG_FS 2200#ifdef CONFIG_DEBUG_FS
2118 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); 2201 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 8079c31ac5e6..4e56a27f9689 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -52,6 +52,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
52 struct xenvif_queue *queue = m->private; 52 struct xenvif_queue *queue = m->private;
53 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; 53 struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
54 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; 54 struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
55 struct netdev_queue *dev_queue;
55 56
56 if (tx_ring->sring) { 57 if (tx_ring->sring) {
57 struct xen_netif_tx_sring *sring = tx_ring->sring; 58 struct xen_netif_tx_sring *sring = tx_ring->sring;
@@ -112,6 +113,13 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
112 queue->credit_timeout.expires, 113 queue->credit_timeout.expires,
113 jiffies); 114 jiffies);
114 115
116 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
117
118 seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
119 queue->rx_queue_len, queue->rx_queue_max,
120 skb_queue_len(&queue->rx_queue),
121 netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
122
115 return 0; 123 return 0;
116} 124}
117 125
@@ -703,6 +711,7 @@ static void connect(struct backend_info *be)
703 be->vif->queues = vzalloc(requested_num_queues * 711 be->vif->queues = vzalloc(requested_num_queues *
704 sizeof(struct xenvif_queue)); 712 sizeof(struct xenvif_queue));
705 be->vif->num_queues = requested_num_queues; 713 be->vif->num_queues = requested_num_queues;
714 be->vif->stalled_queues = requested_num_queues;
706 715
707 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 716 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
708 queue = &be->vif->queues[queue_index]; 717 queue = &be->vif->queues[queue_index];
@@ -873,15 +882,10 @@ static int read_xenbus_vif_flags(struct backend_info *be)
873 if (!rx_copy) 882 if (!rx_copy)
874 return -EOPNOTSUPP; 883 return -EOPNOTSUPP;
875 884
876 if (vif->dev->tx_queue_len != 0) { 885 if (xenbus_scanf(XBT_NIL, dev->otherend,
877 if (xenbus_scanf(XBT_NIL, dev->otherend, 886 "feature-rx-notify", "%d", &val) < 0 || val == 0) {
878 "feature-rx-notify", "%d", &val) < 0) 887 xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
879 val = 0; 888 return -EINVAL;
880 if (val)
881 vif->can_queue = 1;
882 else
883 /* Must be non-zero for pfifo_fast to work. */
884 vif->dev->tx_queue_len = 1;
885 } 889 }
886 890
887 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 891 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 59fb12e84e6b..dc566b38645f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -243,23 +243,27 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node)
243 * This function assign memory region pointed by "memory-region" device tree 243 * This function assign memory region pointed by "memory-region" device tree
244 * property to the given device. 244 * property to the given device.
245 */ 245 */
246void of_reserved_mem_device_init(struct device *dev) 246int of_reserved_mem_device_init(struct device *dev)
247{ 247{
248 struct reserved_mem *rmem; 248 struct reserved_mem *rmem;
249 struct device_node *np; 249 struct device_node *np;
250 int ret;
250 251
251 np = of_parse_phandle(dev->of_node, "memory-region", 0); 252 np = of_parse_phandle(dev->of_node, "memory-region", 0);
252 if (!np) 253 if (!np)
253 return; 254 return -ENODEV;
254 255
255 rmem = __find_rmem(np); 256 rmem = __find_rmem(np);
256 of_node_put(np); 257 of_node_put(np);
257 258
258 if (!rmem || !rmem->ops || !rmem->ops->device_init) 259 if (!rmem || !rmem->ops || !rmem->ops->device_init)
259 return; 260 return -EINVAL;
261
262 ret = rmem->ops->device_init(rmem, dev);
263 if (ret == 0)
264 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
260 265
261 rmem->ops->device_init(rmem, dev); 266 return ret;
262 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
263} 267}
264 268
265/** 269/**
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 233fe8a88264..69202d1eb8fb 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -275,15 +275,22 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
275 goto err_pcie; 275 goto err_pcie;
276 } 276 }
277 277
278 /* allow the clocks to stabilize */
279 usleep_range(200, 500);
280
281 /* power up core phy and enable ref clock */ 278 /* power up core phy and enable ref clock */
282 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 279 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
283 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); 280 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
281 /*
282 * the async reset input need ref clock to sync internally,
283 * when the ref clock comes after reset, internal synced
284 * reset time is too short, cannot meet the requirement.
285 * add one ~10us delay here.
286 */
287 udelay(10);
284 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 288 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
285 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); 289 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
286 290
291 /* allow the clocks to stabilize */
292 usleep_range(200, 500);
293
287 /* Some boards don't have PCIe reset GPIO. */ 294 /* Some boards don't have PCIe reset GPIO. */
288 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 295 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
289 gpio_set_value(imx6_pcie->reset_gpio, 0); 296 gpio_set_value(imx6_pcie->reset_gpio, 0);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3a5e7e28b874..07aa722bb12c 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -262,13 +262,6 @@ static int pciehp_probe(struct pcie_device *dev)
262 goto err_out_none; 262 goto err_out_none;
263 } 263 }
264 264
265 if (!dev->port->subordinate) {
266 /* Can happen if we run out of bus numbers during probe */
267 dev_err(&dev->device,
268 "Hotplug bridge without secondary bus, ignoring\n");
269 goto err_out_none;
270 }
271
272 ctrl = pcie_init(dev); 265 ctrl = pcie_init(dev);
273 if (!ctrl) { 266 if (!ctrl) {
274 dev_err(&dev->device, "Controller initialization failed\n"); 267 dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 92b6d9ab00e4..2c6643fdc0cf 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -185,7 +185,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
185} 185}
186static DEVICE_ATTR_RO(modalias); 186static DEVICE_ATTR_RO(modalias);
187 187
188static ssize_t enabled_store(struct device *dev, struct device_attribute *attr, 188static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
189 const char *buf, size_t count) 189 const char *buf, size_t count)
190{ 190{
191 struct pci_dev *pdev = to_pci_dev(dev); 191 struct pci_dev *pdev = to_pci_dev(dev);
@@ -210,7 +210,7 @@ static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
210 return result < 0 ? result : count; 210 return result < 0 ? result : count;
211} 211}
212 212
213static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, 213static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
214 char *buf) 214 char *buf)
215{ 215{
216 struct pci_dev *pdev; 216 struct pci_dev *pdev;
@@ -218,7 +218,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
218 pdev = to_pci_dev(dev); 218 pdev = to_pci_dev(dev);
219 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 219 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
220} 220}
221static DEVICE_ATTR_RW(enabled); 221static DEVICE_ATTR_RW(enable);
222 222
223#ifdef CONFIG_NUMA 223#ifdef CONFIG_NUMA
224static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 224static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
@@ -563,7 +563,7 @@ static struct attribute *pci_dev_attrs[] = {
563#endif 563#endif
564 &dev_attr_dma_mask_bits.attr, 564 &dev_attr_dma_mask_bits.attr,
565 &dev_attr_consistent_dma_mask_bits.attr, 565 &dev_attr_consistent_dma_mask_bits.attr,
566 &dev_attr_enabled.attr, 566 &dev_attr_enable.attr,
567 &dev_attr_broken_parity_status.attr, 567 &dev_attr_broken_parity_status.attr,
568 &dev_attr_msi_bus.attr, 568 &dev_attr_msi_bus.attr,
569#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 569#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index a9f9c46e5022..63fc63911295 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -397,6 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
397 struct pcie_pme_service_data *data = get_service_data(srv); 397 struct pcie_pme_service_data *data = get_service_data(srv);
398 struct pci_dev *port = srv->port; 398 struct pci_dev *port = srv->port;
399 bool wakeup; 399 bool wakeup;
400 int ret;
400 401
401 if (device_may_wakeup(&port->dev)) { 402 if (device_may_wakeup(&port->dev)) {
402 wakeup = true; 403 wakeup = true;
@@ -407,9 +408,10 @@ static int pcie_pme_suspend(struct pcie_device *srv)
407 } 408 }
408 spin_lock_irq(&data->lock); 409 spin_lock_irq(&data->lock);
409 if (wakeup) { 410 if (wakeup) {
410 enable_irq_wake(srv->irq); 411 ret = enable_irq_wake(srv->irq);
411 data->suspend_level = PME_SUSPEND_WAKEUP; 412 data->suspend_level = PME_SUSPEND_WAKEUP;
412 } else { 413 }
414 if (!wakeup || ret) {
413 struct pci_dev *port = srv->port; 415 struct pci_dev *port = srv->port;
414 416
415 pcie_pme_interrupt_enable(port, false); 417 pcie_pme_interrupt_enable(port, false);
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 3611806c9cfd..3cb36693343a 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -100,11 +100,11 @@ static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd)
100 /* Disable SDRAM0 accesses */ 100 /* Disable SDRAM0 accesses */
101 "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 101 "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
102 /* Power down SDRAM0 */ 102 /* Power down SDRAM0 */
103 " str %4, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 103 " str %4, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
104 /* Disable SDRAM1 accesses */ 104 /* Disable SDRAM1 accesses */
105 " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 105 " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
106 /* Power down SDRAM1 */ 106 /* Power down SDRAM1 */
107 " strne %4, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 107 " strne %4, [%1, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
108 /* Reset CPU */ 108 /* Reset CPU */
109 " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" 109 " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
110 110
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b800783800a3..ef2dd2e4754b 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -83,6 +83,7 @@ config PWM_BFIN
83config PWM_CLPS711X 83config PWM_CLPS711X
84 tristate "CLPS711X PWM support" 84 tristate "CLPS711X PWM support"
85 depends on ARCH_CLPS711X || COMPILE_TEST 85 depends on ARCH_CLPS711X || COMPILE_TEST
86 depends on HAS_IOMEM
86 help 87 help
87 Generic PWM framework driver for Cirrus Logic CLPS711X. 88 Generic PWM framework driver for Cirrus Logic CLPS711X.
88 89
@@ -101,6 +102,7 @@ config PWM_EP93XX
101config PWM_FSL_FTM 102config PWM_FSL_FTM
102 tristate "Freescale FlexTimer Module (FTM) PWM support" 103 tristate "Freescale FlexTimer Module (FTM) PWM support"
103 depends on OF 104 depends on OF
105 select REGMAP_MMIO
104 help 106 help
105 Generic FTM PWM framework driver for Freescale VF610 and 107 Generic FTM PWM framework driver for Freescale VF610 and
106 Layerscape LS-1 SoCs. 108 Layerscape LS-1 SoCs.
@@ -149,7 +151,7 @@ config PWM_LPC32XX
149 151
150config PWM_LPSS 152config PWM_LPSS
151 tristate "Intel LPSS PWM support" 153 tristate "Intel LPSS PWM support"
152 depends on ACPI 154 depends on X86
153 help 155 help
154 Generic PWM framework driver for Intel Low Power Subsystem PWM 156 Generic PWM framework driver for Intel Low Power Subsystem PWM
155 controller. 157 controller.
@@ -157,6 +159,24 @@ config PWM_LPSS
157 To compile this driver as a module, choose M here: the module 159 To compile this driver as a module, choose M here: the module
158 will be called pwm-lpss. 160 will be called pwm-lpss.
159 161
162config PWM_LPSS_PCI
163 tristate "Intel LPSS PWM PCI driver"
164 depends on PWM_LPSS && PCI
165 help
166 The PCI driver for Intel Low Power Subsystem PWM controller.
167
168 To compile this driver as a module, choose M here: the module
169 will be called pwm-lpss-pci.
170
171config PWM_LPSS_PLATFORM
172 tristate "Intel LPSS PWM platform driver"
173 depends on PWM_LPSS && ACPI
174 help
175 The platform driver for Intel Low Power Subsystem PWM controller.
176
177 To compile this driver as a module, choose M here: the module
178 will be called pwm-lpss-platform.
179
160config PWM_MXS 180config PWM_MXS
161 tristate "Freescale MXS PWM support" 181 tristate "Freescale MXS PWM support"
162 depends on ARCH_MXS && OF 182 depends on ARCH_MXS && OF
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index f8c577d41091..c458606c3755 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
13obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o 13obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
14obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o 14obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
15obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o 15obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
16obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
17obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
16obj-$(CONFIG_PWM_MXS) += pwm-mxs.o 18obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
17obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o 19obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
18obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o 20obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index d2c35920ff08..966497d10c6e 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -236,7 +236,7 @@ int pwmchip_add(struct pwm_chip *chip)
236 int ret; 236 int ret;
237 237
238 if (!chip || !chip->dev || !chip->ops || !chip->ops->config || 238 if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
239 !chip->ops->enable || !chip->ops->disable) 239 !chip->ops->enable || !chip->ops->disable || !chip->npwm)
240 return -EINVAL; 240 return -EINVAL;
241 241
242 mutex_lock(&pwm_lock); 242 mutex_lock(&pwm_lock);
@@ -602,12 +602,9 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
602 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); 602 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
603 const char *dev_id = dev ? dev_name(dev) : NULL; 603 const char *dev_id = dev ? dev_name(dev) : NULL;
604 struct pwm_chip *chip = NULL; 604 struct pwm_chip *chip = NULL;
605 unsigned int index = 0;
606 unsigned int best = 0; 605 unsigned int best = 0;
607 struct pwm_lookup *p; 606 struct pwm_lookup *p, *chosen = NULL;
608 unsigned int match; 607 unsigned int match;
609 unsigned int period;
610 enum pwm_polarity polarity;
611 608
612 /* look up via DT first */ 609 /* look up via DT first */
613 if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) 610 if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
@@ -653,10 +650,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
653 } 650 }
654 651
655 if (match > best) { 652 if (match > best) {
656 chip = pwmchip_find_by_name(p->provider); 653 chosen = p;
657 index = p->index;
658 period = p->period;
659 polarity = p->polarity;
660 654
661 if (match != 3) 655 if (match != 3)
662 best = match; 656 best = match;
@@ -665,17 +659,22 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
665 } 659 }
666 } 660 }
667 661
668 mutex_unlock(&pwm_lookup_lock); 662 if (!chosen)
663 goto out;
669 664
670 if (chip) 665 chip = pwmchip_find_by_name(chosen->provider);
671 pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id); 666 if (!chip)
672 if (IS_ERR(pwm)) 667 goto out;
673 return pwm;
674 668
675 pwm_set_period(pwm, period); 669 pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id);
676 pwm_set_polarity(pwm, polarity); 670 if (IS_ERR(pwm))
671 goto out;
677 672
673 pwm_set_period(pwm, chosen->period);
674 pwm_set_polarity(pwm, chosen->polarity);
678 675
676out:
677 mutex_unlock(&pwm_lookup_lock);
679 return pwm; 678 return pwm;
680} 679}
681EXPORT_SYMBOL_GPL(pwm_get); 680EXPORT_SYMBOL_GPL(pwm_get);
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 6e700a541ca3..d3c22de9ee47 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -102,7 +102,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
102 int duty_ns, int period_ns) 102 int duty_ns, int period_ns)
103{ 103{
104 struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); 104 struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
105 unsigned long clk_rate, prd, dty; 105 unsigned long prd, dty;
106 unsigned long long div; 106 unsigned long long div;
107 unsigned int pres = 0; 107 unsigned int pres = 0;
108 u32 val; 108 u32 val;
@@ -113,20 +113,18 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
113 return -EBUSY; 113 return -EBUSY;
114 } 114 }
115 115
116 clk_rate = clk_get_rate(atmel_pwm->clk); 116 /* Calculate the period cycles and prescale value */
117 div = clk_rate; 117 div = (unsigned long long)clk_get_rate(atmel_pwm->clk) * period_ns;
118 do_div(div, NSEC_PER_SEC);
118 119
119 /* Calculate the period cycles */
120 while (div > PWM_MAX_PRD) { 120 while (div > PWM_MAX_PRD) {
121 div = clk_rate / (1 << pres); 121 div >>= 1;
122 div = div * period_ns; 122 pres++;
123 /* 1/Hz = 100000000 ns */ 123 }
124 do_div(div, 1000000000); 124
125 125 if (pres > PRD_MAX_PRES) {
126 if (pres++ > PRD_MAX_PRES) { 126 dev_err(chip->dev, "pres exceeds the maximum value\n");
127 dev_err(chip->dev, "pres exceeds the maximum value\n"); 127 return -EINVAL;
128 return -EINVAL;
129 }
130 } 128 }
131 129
132 /* Calculate the duty cycles */ 130 /* Calculate the duty cycles */
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index a18bc8fea385..0f2cc7ef7784 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -18,14 +18,14 @@
18#include <linux/of_address.h> 18#include <linux/of_address.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pwm.h> 20#include <linux/pwm.h>
21#include <linux/regmap.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
23#define FTM_SC 0x00 24#define FTM_SC 0x00
24#define FTM_SC_CLK_MASK 0x3 25#define FTM_SC_CLK_MASK_SHIFT 3
25#define FTM_SC_CLK_SHIFT 3 26#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT)
26#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_SHIFT) 27#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_MASK_SHIFT)
27#define FTM_SC_PS_MASK 0x7 28#define FTM_SC_PS_MASK 0x7
28#define FTM_SC_PS_SHIFT 0
29 29
30#define FTM_CNT 0x04 30#define FTM_CNT 0x04
31#define FTM_MOD 0x08 31#define FTM_MOD 0x08
@@ -83,7 +83,7 @@ struct fsl_pwm_chip {
83 unsigned int cnt_select; 83 unsigned int cnt_select;
84 unsigned int clk_ps; 84 unsigned int clk_ps;
85 85
86 void __iomem *base; 86 struct regmap *regmap;
87 87
88 int period_ns; 88 int period_ns;
89 89
@@ -219,10 +219,11 @@ static unsigned long fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
219 unsigned long period_ns, 219 unsigned long period_ns,
220 unsigned long duty_ns) 220 unsigned long duty_ns)
221{ 221{
222 unsigned long long val, duty; 222 unsigned long long duty;
223 u32 val;
223 224
224 val = readl(fpc->base + FTM_MOD); 225 regmap_read(fpc->regmap, FTM_MOD, &val);
225 duty = duty_ns * (val + 1); 226 duty = (unsigned long long)duty_ns * (val + 1);
226 do_div(duty, period_ns); 227 do_div(duty, period_ns);
227 228
228 return (unsigned long)duty; 229 return (unsigned long)duty;
@@ -232,7 +233,7 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
232 int duty_ns, int period_ns) 233 int duty_ns, int period_ns)
233{ 234{
234 struct fsl_pwm_chip *fpc = to_fsl_chip(chip); 235 struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
235 u32 val, period, duty; 236 u32 period, duty;
236 237
237 mutex_lock(&fpc->lock); 238 mutex_lock(&fpc->lock);
238 239
@@ -257,11 +258,9 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
257 return -EINVAL; 258 return -EINVAL;
258 } 259 }
259 260
260 val = readl(fpc->base + FTM_SC); 261 regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_PS_MASK,
261 val &= ~(FTM_SC_PS_MASK << FTM_SC_PS_SHIFT); 262 fpc->clk_ps);
262 val |= fpc->clk_ps; 263 regmap_write(fpc->regmap, FTM_MOD, period - 1);
263 writel(val, fpc->base + FTM_SC);
264 writel(period - 1, fpc->base + FTM_MOD);
265 264
266 fpc->period_ns = period_ns; 265 fpc->period_ns = period_ns;
267 } 266 }
@@ -270,8 +269,9 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
270 269
271 duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns); 270 duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns);
272 271
273 writel(FTM_CSC_MSB | FTM_CSC_ELSB, fpc->base + FTM_CSC(pwm->hwpwm)); 272 regmap_write(fpc->regmap, FTM_CSC(pwm->hwpwm),
274 writel(duty, fpc->base + FTM_CV(pwm->hwpwm)); 273 FTM_CSC_MSB | FTM_CSC_ELSB);
274 regmap_write(fpc->regmap, FTM_CV(pwm->hwpwm), duty);
275 275
276 return 0; 276 return 0;
277} 277}
@@ -283,31 +283,28 @@ static int fsl_pwm_set_polarity(struct pwm_chip *chip,
283 struct fsl_pwm_chip *fpc = to_fsl_chip(chip); 283 struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
284 u32 val; 284 u32 val;
285 285
286 val = readl(fpc->base + FTM_POL); 286 regmap_read(fpc->regmap, FTM_POL, &val);
287 287
288 if (polarity == PWM_POLARITY_INVERSED) 288 if (polarity == PWM_POLARITY_INVERSED)
289 val |= BIT(pwm->hwpwm); 289 val |= BIT(pwm->hwpwm);
290 else 290 else
291 val &= ~BIT(pwm->hwpwm); 291 val &= ~BIT(pwm->hwpwm);
292 292
293 writel(val, fpc->base + FTM_POL); 293 regmap_write(fpc->regmap, FTM_POL, val);
294 294
295 return 0; 295 return 0;
296} 296}
297 297
298static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc) 298static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
299{ 299{
300 u32 val;
301 int ret; 300 int ret;
302 301
303 if (fpc->use_count != 0) 302 if (fpc->use_count != 0)
304 return 0; 303 return 0;
305 304
306 /* select counter clock source */ 305 /* select counter clock source */
307 val = readl(fpc->base + FTM_SC); 306 regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
308 val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT); 307 FTM_SC_CLK(fpc->cnt_select));
309 val |= FTM_SC_CLK(fpc->cnt_select);
310 writel(val, fpc->base + FTM_SC);
311 308
312 ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]); 309 ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]);
313 if (ret) 310 if (ret)
@@ -327,13 +324,10 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
327static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 324static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
328{ 325{
329 struct fsl_pwm_chip *fpc = to_fsl_chip(chip); 326 struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
330 u32 val;
331 int ret; 327 int ret;
332 328
333 mutex_lock(&fpc->lock); 329 mutex_lock(&fpc->lock);
334 val = readl(fpc->base + FTM_OUTMASK); 330 regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm), 0);
335 val &= ~BIT(pwm->hwpwm);
336 writel(val, fpc->base + FTM_OUTMASK);
337 331
338 ret = fsl_counter_clock_enable(fpc); 332 ret = fsl_counter_clock_enable(fpc);
339 mutex_unlock(&fpc->lock); 333 mutex_unlock(&fpc->lock);
@@ -343,8 +337,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
343 337
344static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc) 338static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
345{ 339{
346 u32 val;
347
348 /* 340 /*
349 * already disabled, do nothing 341 * already disabled, do nothing
350 */ 342 */
@@ -356,9 +348,7 @@ static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
356 return; 348 return;
357 349
358 /* no users left, disable PWM counter clock */ 350 /* no users left, disable PWM counter clock */
359 val = readl(fpc->base + FTM_SC); 351 regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
360 val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT);
361 writel(val, fpc->base + FTM_SC);
362 352
363 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]); 353 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
364 clk_disable_unprepare(fpc->clk[fpc->cnt_select]); 354 clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
@@ -370,14 +360,12 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
370 u32 val; 360 u32 val;
371 361
372 mutex_lock(&fpc->lock); 362 mutex_lock(&fpc->lock);
373 val = readl(fpc->base + FTM_OUTMASK); 363 regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
374 val |= BIT(pwm->hwpwm); 364 BIT(pwm->hwpwm));
375 writel(val, fpc->base + FTM_OUTMASK);
376 365
377 fsl_counter_clock_disable(fpc); 366 fsl_counter_clock_disable(fpc);
378 367
379 val = readl(fpc->base + FTM_OUTMASK); 368 regmap_read(fpc->regmap, FTM_OUTMASK, &val);
380
381 if ((val & 0xFF) == 0xFF) 369 if ((val & 0xFF) == 0xFF)
382 fpc->period_ns = 0; 370 fpc->period_ns = 0;
383 371
@@ -402,19 +390,28 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
402 if (ret) 390 if (ret)
403 return ret; 391 return ret;
404 392
405 writel(0x00, fpc->base + FTM_CNTIN); 393 regmap_write(fpc->regmap, FTM_CNTIN, 0x00);
406 writel(0x00, fpc->base + FTM_OUTINIT); 394 regmap_write(fpc->regmap, FTM_OUTINIT, 0x00);
407 writel(0xFF, fpc->base + FTM_OUTMASK); 395 regmap_write(fpc->regmap, FTM_OUTMASK, 0xFF);
408 396
409 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]); 397 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
410 398
411 return 0; 399 return 0;
412} 400}
413 401
402static const struct regmap_config fsl_pwm_regmap_config = {
403 .reg_bits = 32,
404 .reg_stride = 4,
405 .val_bits = 32,
406
407 .max_register = FTM_PWMLOAD,
408};
409
414static int fsl_pwm_probe(struct platform_device *pdev) 410static int fsl_pwm_probe(struct platform_device *pdev)
415{ 411{
416 struct fsl_pwm_chip *fpc; 412 struct fsl_pwm_chip *fpc;
417 struct resource *res; 413 struct resource *res;
414 void __iomem *base;
418 int ret; 415 int ret;
419 416
420 fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL); 417 fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL);
@@ -426,9 +423,16 @@ static int fsl_pwm_probe(struct platform_device *pdev)
426 fpc->chip.dev = &pdev->dev; 423 fpc->chip.dev = &pdev->dev;
427 424
428 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 425 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
429 fpc->base = devm_ioremap_resource(&pdev->dev, res); 426 base = devm_ioremap_resource(&pdev->dev, res);
430 if (IS_ERR(fpc->base)) 427 if (IS_ERR(base))
431 return PTR_ERR(fpc->base); 428 return PTR_ERR(base);
429
430 fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
431 &fsl_pwm_regmap_config);
432 if (IS_ERR(fpc->regmap)) {
433 dev_err(&pdev->dev, "regmap init failed\n");
434 return PTR_ERR(fpc->regmap);
435 }
432 436
433 fpc->clk[FSL_PWM_CLK_SYS] = devm_clk_get(&pdev->dev, "ftm_sys"); 437 fpc->clk[FSL_PWM_CLK_SYS] = devm_clk_get(&pdev->dev, "ftm_sys");
434 if (IS_ERR(fpc->clk[FSL_PWM_CLK_SYS])) { 438 if (IS_ERR(fpc->clk[FSL_PWM_CLK_SYS])) {
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 5449d9150d40..f8b5f109c1ab 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/delay.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/pwm.h> 19#include <linux/pwm.h>
19#include <linux/of.h> 20#include <linux/of.h>
@@ -21,24 +22,30 @@
21 22
22/* i.MX1 and i.MX21 share the same PWM function block: */ 23/* i.MX1 and i.MX21 share the same PWM function block: */
23 24
24#define MX1_PWMC 0x00 /* PWM Control Register */ 25#define MX1_PWMC 0x00 /* PWM Control Register */
25#define MX1_PWMS 0x04 /* PWM Sample Register */ 26#define MX1_PWMS 0x04 /* PWM Sample Register */
26#define MX1_PWMP 0x08 /* PWM Period Register */ 27#define MX1_PWMP 0x08 /* PWM Period Register */
27 28
28#define MX1_PWMC_EN (1 << 4) 29#define MX1_PWMC_EN (1 << 4)
29 30
30/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ 31/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */
31 32
32#define MX3_PWMCR 0x00 /* PWM Control Register */ 33#define MX3_PWMCR 0x00 /* PWM Control Register */
33#define MX3_PWMSAR 0x0C /* PWM Sample Register */ 34#define MX3_PWMSR 0x04 /* PWM Status Register */
34#define MX3_PWMPR 0x10 /* PWM Period Register */ 35#define MX3_PWMSAR 0x0C /* PWM Sample Register */
35#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) 36#define MX3_PWMPR 0x10 /* PWM Period Register */
36#define MX3_PWMCR_DOZEEN (1 << 24) 37#define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4)
37#define MX3_PWMCR_WAITEN (1 << 23) 38#define MX3_PWMCR_DOZEEN (1 << 24)
39#define MX3_PWMCR_WAITEN (1 << 23)
38#define MX3_PWMCR_DBGEN (1 << 22) 40#define MX3_PWMCR_DBGEN (1 << 22)
39#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) 41#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
40#define MX3_PWMCR_CLKSRC_IPG (1 << 16) 42#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
41#define MX3_PWMCR_EN (1 << 0) 43#define MX3_PWMCR_SWR (1 << 3)
44#define MX3_PWMCR_EN (1 << 0)
45#define MX3_PWMSR_FIFOAV_4WORDS 0x4
46#define MX3_PWMSR_FIFOAV_MASK 0x7
47
48#define MX3_PWM_SWR_LOOP 5
42 49
43struct imx_chip { 50struct imx_chip {
44 struct clk *clk_per; 51 struct clk *clk_per;
@@ -103,9 +110,43 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
103 struct pwm_device *pwm, int duty_ns, int period_ns) 110 struct pwm_device *pwm, int duty_ns, int period_ns)
104{ 111{
105 struct imx_chip *imx = to_imx_chip(chip); 112 struct imx_chip *imx = to_imx_chip(chip);
113 struct device *dev = chip->dev;
106 unsigned long long c; 114 unsigned long long c;
107 unsigned long period_cycles, duty_cycles, prescale; 115 unsigned long period_cycles, duty_cycles, prescale;
108 u32 cr; 116 unsigned int period_ms;
117 bool enable = test_bit(PWMF_ENABLED, &pwm->flags);
118 int wait_count = 0, fifoav;
119 u32 cr, sr;
120
121 /*
122 * i.MX PWMv2 has a 4-word sample FIFO.
123 * In order to avoid FIFO overflow issue, we do software reset
124 * to clear all sample FIFO if the controller is disabled or
125 * wait for a full PWM cycle to get a relinquished FIFO slot
126 * when the controller is enabled and the FIFO is fully loaded.
127 */
128 if (enable) {
129 sr = readl(imx->mmio_base + MX3_PWMSR);
130 fifoav = sr & MX3_PWMSR_FIFOAV_MASK;
131 if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
132 period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC);
133 msleep(period_ms);
134
135 sr = readl(imx->mmio_base + MX3_PWMSR);
136 if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK))
137 dev_warn(dev, "there is no free FIFO slot\n");
138 }
139 } else {
140 writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR);
141 do {
142 usleep_range(200, 1000);
143 cr = readl(imx->mmio_base + MX3_PWMCR);
144 } while ((cr & MX3_PWMCR_SWR) &&
145 (wait_count++ < MX3_PWM_SWR_LOOP));
146
147 if (cr & MX3_PWMCR_SWR)
148 dev_warn(dev, "software reset timeout\n");
149 }
109 150
110 c = clk_get_rate(imx->clk_per); 151 c = clk_get_rate(imx->clk_per);
111 c = c * period_ns; 152 c = c * period_ns;
@@ -135,7 +176,7 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
135 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | 176 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
136 MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; 177 MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH;
137 178
138 if (test_bit(PWMF_ENABLED, &pwm->flags)) 179 if (enable)
139 cr |= MX3_PWMCR_EN; 180 cr |= MX3_PWMCR_EN;
140 181
141 writel(cr, imx->mmio_base + MX3_PWMCR); 182 writel(cr, imx->mmio_base + MX3_PWMCR);
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
new file mode 100644
index 000000000000..cf20d2beacdd
--- /dev/null
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -0,0 +1,64 @@
1/*
2 * Intel Low Power Subsystem PWM controller PCI driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 *
6 * Derived from the original pwm-lpss.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16
17#include "pwm-lpss.h"
18
19static int pwm_lpss_probe_pci(struct pci_dev *pdev,
20 const struct pci_device_id *id)
21{
22 const struct pwm_lpss_boardinfo *info;
23 struct pwm_lpss_chip *lpwm;
24 int err;
25
26 err = pcim_enable_device(pdev);
27 if (err < 0)
28 return err;
29
30 info = (struct pwm_lpss_boardinfo *)id->driver_data;
31 lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
32 if (IS_ERR(lpwm))
33 return PTR_ERR(lpwm);
34
35 pci_set_drvdata(pdev, lpwm);
36 return 0;
37}
38
39static void pwm_lpss_remove_pci(struct pci_dev *pdev)
40{
41 struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev);
42
43 pwm_lpss_remove(lpwm);
44}
45
46static const struct pci_device_id pwm_lpss_pci_ids[] = {
47 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
48 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
49 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
50 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
51 { },
52};
53MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids);
54
55static struct pci_driver pwm_lpss_driver_pci = {
56 .name = "pwm-lpss",
57 .id_table = pwm_lpss_pci_ids,
58 .probe = pwm_lpss_probe_pci,
59 .remove = pwm_lpss_remove_pci,
60};
61module_pci_driver(pwm_lpss_driver_pci);
62
63MODULE_DESCRIPTION("PWM PCI driver for Intel LPSS");
64MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
new file mode 100644
index 000000000000..18a9c880a76d
--- /dev/null
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -0,0 +1,68 @@
1/*
2 * Intel Low Power Subsystem PWM controller driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 *
6 * Derived from the original pwm-lpss.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/acpi.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17
18#include "pwm-lpss.h"
19
20static int pwm_lpss_probe_platform(struct platform_device *pdev)
21{
22 const struct pwm_lpss_boardinfo *info;
23 const struct acpi_device_id *id;
24 struct pwm_lpss_chip *lpwm;
25 struct resource *r;
26
27 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
28 if (!id)
29 return -ENODEV;
30
31 info = (const struct pwm_lpss_boardinfo *)id->driver_data;
32 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
33
34 lpwm = pwm_lpss_probe(&pdev->dev, r, info);
35 if (IS_ERR(lpwm))
36 return PTR_ERR(lpwm);
37
38 platform_set_drvdata(pdev, lpwm);
39 return 0;
40}
41
42static int pwm_lpss_remove_platform(struct platform_device *pdev)
43{
44 struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
45
46 return pwm_lpss_remove(lpwm);
47}
48
49static const struct acpi_device_id pwm_lpss_acpi_match[] = {
50 { "80860F09", (unsigned long)&pwm_lpss_byt_info },
51 { "80862288", (unsigned long)&pwm_lpss_bsw_info },
52 { },
53};
54MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
55
56static struct platform_driver pwm_lpss_driver_platform = {
57 .driver = {
58 .name = "pwm-lpss",
59 .acpi_match_table = pwm_lpss_acpi_match,
60 },
61 .probe = pwm_lpss_probe_platform,
62 .remove = pwm_lpss_remove_platform,
63};
64module_platform_driver(pwm_lpss_driver_platform);
65
66MODULE_DESCRIPTION("PWM platform driver for Intel LPSS");
67MODULE_LICENSE("GPL v2");
68MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 4df994f72d96..e9798253a16f 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -13,15 +13,11 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/acpi.h> 16#include <linux/io.h>
17#include <linux/device.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/pwm.h>
21#include <linux/platform_device.h>
22#include <linux/pci.h>
23 19
24static int pci_drv, plat_drv; /* So we know which drivers registered */ 20#include "pwm-lpss.h"
25 21
26#define PWM 0x00000000 22#define PWM 0x00000000
27#define PWM_ENABLE BIT(31) 23#define PWM_ENABLE BIT(31)
@@ -39,14 +35,17 @@ struct pwm_lpss_chip {
39 unsigned long clk_rate; 35 unsigned long clk_rate;
40}; 36};
41 37
42struct pwm_lpss_boardinfo { 38/* BayTrail */
43 unsigned long clk_rate; 39const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
40 .clk_rate = 25000000
44}; 41};
42EXPORT_SYMBOL_GPL(pwm_lpss_byt_info);
45 43
46/* BayTrail */ 44/* Braswell */
47static const struct pwm_lpss_boardinfo byt_info = { 45const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
48 25000000 46 .clk_rate = 19200000
49}; 47};
48EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info);
50 49
51static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) 50static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
52{ 51{
@@ -118,9 +117,8 @@ static const struct pwm_ops pwm_lpss_ops = {
118 .owner = THIS_MODULE, 117 .owner = THIS_MODULE,
119}; 118};
120 119
121static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, 120struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
122 struct resource *r, 121 const struct pwm_lpss_boardinfo *info)
123 const struct pwm_lpss_boardinfo *info)
124{ 122{
125 struct pwm_lpss_chip *lpwm; 123 struct pwm_lpss_chip *lpwm;
126 int ret; 124 int ret;
@@ -147,8 +145,9 @@ static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev,
147 145
148 return lpwm; 146 return lpwm;
149} 147}
148EXPORT_SYMBOL_GPL(pwm_lpss_probe);
150 149
151static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) 150int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
152{ 151{
153 u32 ctrl; 152 u32 ctrl;
154 153
@@ -157,114 +156,8 @@ static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
157 156
158 return pwmchip_remove(&lpwm->chip); 157 return pwmchip_remove(&lpwm->chip);
159} 158}
160 159EXPORT_SYMBOL_GPL(pwm_lpss_remove);
161static int pwm_lpss_probe_pci(struct pci_dev *pdev,
162 const struct pci_device_id *id)
163{
164 const struct pwm_lpss_boardinfo *info;
165 struct pwm_lpss_chip *lpwm;
166 int err;
167
168 err = pci_enable_device(pdev);
169 if (err < 0)
170 return err;
171
172 info = (struct pwm_lpss_boardinfo *)id->driver_data;
173 lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
174 if (IS_ERR(lpwm))
175 return PTR_ERR(lpwm);
176
177 pci_set_drvdata(pdev, lpwm);
178 return 0;
179}
180
181static void pwm_lpss_remove_pci(struct pci_dev *pdev)
182{
183 struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev);
184
185 pwm_lpss_remove(lpwm);
186 pci_disable_device(pdev);
187}
188
189static struct pci_device_id pwm_lpss_pci_ids[] = {
190 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&byt_info},
191 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&byt_info},
192 { },
193};
194MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids);
195
196static struct pci_driver pwm_lpss_driver_pci = {
197 .name = "pwm-lpss",
198 .id_table = pwm_lpss_pci_ids,
199 .probe = pwm_lpss_probe_pci,
200 .remove = pwm_lpss_remove_pci,
201};
202
203static int pwm_lpss_probe_platform(struct platform_device *pdev)
204{
205 const struct pwm_lpss_boardinfo *info;
206 const struct acpi_device_id *id;
207 struct pwm_lpss_chip *lpwm;
208 struct resource *r;
209
210 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
211 if (!id)
212 return -ENODEV;
213
214 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
215
216 info = (struct pwm_lpss_boardinfo *)id->driver_data;
217 lpwm = pwm_lpss_probe(&pdev->dev, r, info);
218 if (IS_ERR(lpwm))
219 return PTR_ERR(lpwm);
220
221 platform_set_drvdata(pdev, lpwm);
222 return 0;
223}
224
225static int pwm_lpss_remove_platform(struct platform_device *pdev)
226{
227 struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
228
229 return pwm_lpss_remove(lpwm);
230}
231
232static const struct acpi_device_id pwm_lpss_acpi_match[] = {
233 { "80860F09", (unsigned long)&byt_info },
234 { },
235};
236MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
237
238static struct platform_driver pwm_lpss_driver_platform = {
239 .driver = {
240 .name = "pwm-lpss",
241 .acpi_match_table = pwm_lpss_acpi_match,
242 },
243 .probe = pwm_lpss_probe_platform,
244 .remove = pwm_lpss_remove_platform,
245};
246
247static int __init pwm_init(void)
248{
249 pci_drv = pci_register_driver(&pwm_lpss_driver_pci);
250 plat_drv = platform_driver_register(&pwm_lpss_driver_platform);
251 if (pci_drv && plat_drv)
252 return pci_drv;
253
254 return 0;
255}
256module_init(pwm_init);
257
258static void __exit pwm_exit(void)
259{
260 if (!pci_drv)
261 pci_unregister_driver(&pwm_lpss_driver_pci);
262 if (!plat_drv)
263 platform_driver_unregister(&pwm_lpss_driver_platform);
264}
265module_exit(pwm_exit);
266 160
267MODULE_DESCRIPTION("PWM driver for Intel LPSS"); 161MODULE_DESCRIPTION("PWM driver for Intel LPSS");
268MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 162MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
269MODULE_LICENSE("GPL v2"); 163MODULE_LICENSE("GPL v2");
270MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
new file mode 100644
index 000000000000..aa041bb1b67d
--- /dev/null
+++ b/drivers/pwm/pwm-lpss.h
@@ -0,0 +1,32 @@
1/*
2 * Intel Low Power Subsystem PWM controller driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 *
6 * Derived from the original pwm-lpss.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __PWM_LPSS_H
14#define __PWM_LPSS_H
15
16#include <linux/device.h>
17#include <linux/pwm.h>
18
19struct pwm_lpss_chip;
20
21struct pwm_lpss_boardinfo {
22 unsigned long clk_rate;
23};
24
25extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info;
26extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info;
27
28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
29 const struct pwm_lpss_boardinfo *info);
30int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
31
32#endif /* __PWM_LPSS_H */
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index bdd8644c01cf..9442df244101 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -24,7 +24,9 @@
24#define PWM_ENABLE (1 << 0) 24#define PWM_ENABLE (1 << 0)
25#define PWM_CONTINUOUS (1 << 1) 25#define PWM_CONTINUOUS (1 << 1)
26#define PWM_DUTY_POSITIVE (1 << 3) 26#define PWM_DUTY_POSITIVE (1 << 3)
27#define PWM_DUTY_NEGATIVE (0 << 3)
27#define PWM_INACTIVE_NEGATIVE (0 << 4) 28#define PWM_INACTIVE_NEGATIVE (0 << 4)
29#define PWM_INACTIVE_POSITIVE (1 << 4)
28#define PWM_OUTPUT_LEFT (0 << 5) 30#define PWM_OUTPUT_LEFT (0 << 5)
29#define PWM_LP_DISABLE (0 << 8) 31#define PWM_LP_DISABLE (0 << 8)
30 32
@@ -45,8 +47,10 @@ struct rockchip_pwm_regs {
45struct rockchip_pwm_data { 47struct rockchip_pwm_data {
46 struct rockchip_pwm_regs regs; 48 struct rockchip_pwm_regs regs;
47 unsigned int prescaler; 49 unsigned int prescaler;
50 const struct pwm_ops *ops;
48 51
49 void (*set_enable)(struct pwm_chip *chip, bool enable); 52 void (*set_enable)(struct pwm_chip *chip,
53 struct pwm_device *pwm, bool enable);
50}; 54};
51 55
52static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) 56static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
@@ -54,7 +58,8 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
54 return container_of(c, struct rockchip_pwm_chip, chip); 58 return container_of(c, struct rockchip_pwm_chip, chip);
55} 59}
56 60
57static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) 61static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip,
62 struct pwm_device *pwm, bool enable)
58{ 63{
59 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 64 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
60 u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN; 65 u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
@@ -70,14 +75,19 @@ static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable)
70 writel_relaxed(val, pc->base + pc->data->regs.ctrl); 75 writel_relaxed(val, pc->base + pc->data->regs.ctrl);
71} 76}
72 77
73static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) 78static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
79 struct pwm_device *pwm, bool enable)
74{ 80{
75 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 81 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
76 u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | 82 u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
77 PWM_CONTINUOUS | PWM_DUTY_POSITIVE | 83 PWM_CONTINUOUS;
78 PWM_INACTIVE_NEGATIVE;
79 u32 val; 84 u32 val;
80 85
86 if (pwm->polarity == PWM_POLARITY_INVERSED)
87 enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
88 else
89 enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
90
81 val = readl_relaxed(pc->base + pc->data->regs.ctrl); 91 val = readl_relaxed(pc->base + pc->data->regs.ctrl);
82 92
83 if (enable) 93 if (enable)
@@ -124,6 +134,19 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
124 return 0; 134 return 0;
125} 135}
126 136
137static int rockchip_pwm_set_polarity(struct pwm_chip *chip,
138 struct pwm_device *pwm,
139 enum pwm_polarity polarity)
140{
141 /*
142 * No action needed here because pwm->polarity will be set by the core
143 * and the core will only change polarity when the PWM is not enabled.
144 * We'll handle things in set_enable().
145 */
146
147 return 0;
148}
149
127static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 150static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
128{ 151{
129 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 152 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
@@ -133,7 +156,7 @@ static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
133 if (ret) 156 if (ret)
134 return ret; 157 return ret;
135 158
136 pc->data->set_enable(chip, true); 159 pc->data->set_enable(chip, pwm, true);
137 160
138 return 0; 161 return 0;
139} 162}
@@ -142,18 +165,26 @@ static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
142{ 165{
143 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 166 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
144 167
145 pc->data->set_enable(chip, false); 168 pc->data->set_enable(chip, pwm, false);
146 169
147 clk_disable(pc->clk); 170 clk_disable(pc->clk);
148} 171}
149 172
150static const struct pwm_ops rockchip_pwm_ops = { 173static const struct pwm_ops rockchip_pwm_ops_v1 = {
151 .config = rockchip_pwm_config, 174 .config = rockchip_pwm_config,
152 .enable = rockchip_pwm_enable, 175 .enable = rockchip_pwm_enable,
153 .disable = rockchip_pwm_disable, 176 .disable = rockchip_pwm_disable,
154 .owner = THIS_MODULE, 177 .owner = THIS_MODULE,
155}; 178};
156 179
180static const struct pwm_ops rockchip_pwm_ops_v2 = {
181 .config = rockchip_pwm_config,
182 .set_polarity = rockchip_pwm_set_polarity,
183 .enable = rockchip_pwm_enable,
184 .disable = rockchip_pwm_disable,
185 .owner = THIS_MODULE,
186};
187
157static const struct rockchip_pwm_data pwm_data_v1 = { 188static const struct rockchip_pwm_data pwm_data_v1 = {
158 .regs = { 189 .regs = {
159 .duty = 0x04, 190 .duty = 0x04,
@@ -162,6 +193,7 @@ static const struct rockchip_pwm_data pwm_data_v1 = {
162 .ctrl = 0x0c, 193 .ctrl = 0x0c,
163 }, 194 },
164 .prescaler = 2, 195 .prescaler = 2,
196 .ops = &rockchip_pwm_ops_v1,
165 .set_enable = rockchip_pwm_set_enable_v1, 197 .set_enable = rockchip_pwm_set_enable_v1,
166}; 198};
167 199
@@ -173,6 +205,7 @@ static const struct rockchip_pwm_data pwm_data_v2 = {
173 .ctrl = 0x0c, 205 .ctrl = 0x0c,
174 }, 206 },
175 .prescaler = 1, 207 .prescaler = 1,
208 .ops = &rockchip_pwm_ops_v2,
176 .set_enable = rockchip_pwm_set_enable_v2, 209 .set_enable = rockchip_pwm_set_enable_v2,
177}; 210};
178 211
@@ -184,6 +217,7 @@ static const struct rockchip_pwm_data pwm_data_vop = {
184 .ctrl = 0x00, 217 .ctrl = 0x00,
185 }, 218 },
186 .prescaler = 1, 219 .prescaler = 1,
220 .ops = &rockchip_pwm_ops_v2,
187 .set_enable = rockchip_pwm_set_enable_v2, 221 .set_enable = rockchip_pwm_set_enable_v2,
188}; 222};
189 223
@@ -227,10 +261,15 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
227 261
228 pc->data = id->data; 262 pc->data = id->data;
229 pc->chip.dev = &pdev->dev; 263 pc->chip.dev = &pdev->dev;
230 pc->chip.ops = &rockchip_pwm_ops; 264 pc->chip.ops = pc->data->ops;
231 pc->chip.base = -1; 265 pc->chip.base = -1;
232 pc->chip.npwm = 1; 266 pc->chip.npwm = 1;
233 267
268 if (pc->data->ops->set_polarity) {
269 pc->chip.of_xlate = of_pwm_xlate_with_flags;
270 pc->chip.of_pwm_n_cells = 3;
271 }
272
234 ret = pwmchip_add(&pc->chip); 273 ret = pwmchip_add(&pc->chip);
235 if (ret < 0) { 274 if (ret < 0) {
236 clk_unprepare(pc->clk); 275 clk_unprepare(pc->clk);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e305416d7697..196a5c8838c4 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -44,7 +44,7 @@ static const int rk808_buck_config_regs[] = {
44}; 44};
45 45
46static const struct regulator_linear_range rk808_buck_voltage_ranges[] = { 46static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
47 REGULATOR_LINEAR_RANGE(700000, 0, 63, 12500), 47 REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500),
48}; 48};
49 49
50static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = { 50static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8cd0beebdc3f..6dd12ddbabc6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -830,7 +830,7 @@ config RTC_DRV_DA9063
830 830
831config RTC_DRV_EFI 831config RTC_DRV_EFI
832 tristate "EFI RTC" 832 tristate "EFI RTC"
833 depends on EFI 833 depends on EFI && !X86
834 help 834 help
835 If you say yes here you will get support for the EFI 835 If you say yes here you will get support for the EFI
836 Real Time Clock. 836 Real Time Clock.
@@ -1320,7 +1320,7 @@ config RTC_DRV_LPC32XX
1320 1320
1321config RTC_DRV_PM8XXX 1321config RTC_DRV_PM8XXX
1322 tristate "Qualcomm PMIC8XXX RTC" 1322 tristate "Qualcomm PMIC8XXX RTC"
1323 depends on MFD_PM8XXX 1323 depends on MFD_PM8XXX || MFD_SPMI_PMIC
1324 help 1324 help
1325 If you say yes here you get support for the 1325 If you say yes here you get support for the
1326 Qualcomm PMIC8XXX RTC. 1326 Qualcomm PMIC8XXX RTC.
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 314129e66d6e..92679df6d6e2 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -160,7 +160,7 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node)
160 dev_err(dev, "bq32k: diode and resistor mismatch\n"); 160 dev_err(dev, "bq32k: diode and resistor mismatch\n");
161 return -EINVAL; 161 return -EINVAL;
162 } 162 }
163 reg = 0x25; 163 reg = 0x45;
164 break; 164 break;
165 165
166 default: 166 default:
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index c384fec6d173..53b589dc34eb 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -236,3 +236,4 @@ MODULE_ALIAS("platform:rtc-efi");
236MODULE_AUTHOR("dann frazier <dannf@hp.com>"); 236MODULE_AUTHOR("dann frazier <dannf@hp.com>");
237MODULE_LICENSE("GPL"); 237MODULE_LICENSE("GPL");
238MODULE_DESCRIPTION("EFI RTC driver"); 238MODULE_DESCRIPTION("EFI RTC driver");
239MODULE_ALIAS("platform:rtc-efi");
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 197699f358c7..5adcf111fc14 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -27,21 +27,36 @@
27 27
28/* RTC_CTRL register bit fields */ 28/* RTC_CTRL register bit fields */
29#define PM8xxx_RTC_ENABLE BIT(7) 29#define PM8xxx_RTC_ENABLE BIT(7)
30#define PM8xxx_RTC_ALARM_ENABLE BIT(1)
31#define PM8xxx_RTC_ALARM_CLEAR BIT(0) 30#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
32 31
33#define NUM_8_BIT_RTC_REGS 0x4 32#define NUM_8_BIT_RTC_REGS 0x4
34 33
35/** 34/**
35 * struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions
36 * @ctrl: base address of control register
37 * @write: base address of write register
38 * @read: base address of read register
39 * @alarm_ctrl: base address of alarm control register
40 * @alarm_ctrl2: base address of alarm control2 register
41 * @alarm_rw: base address of alarm read-write register
42 * @alarm_en: alarm enable mask
43 */
44struct pm8xxx_rtc_regs {
45 unsigned int ctrl;
46 unsigned int write;
47 unsigned int read;
48 unsigned int alarm_ctrl;
49 unsigned int alarm_ctrl2;
50 unsigned int alarm_rw;
51 unsigned int alarm_en;
52};
53
54/**
36 * struct pm8xxx_rtc - rtc driver internal structure 55 * struct pm8xxx_rtc - rtc driver internal structure
37 * @rtc: rtc device for this driver. 56 * @rtc: rtc device for this driver.
38 * @regmap: regmap used to access RTC registers 57 * @regmap: regmap used to access RTC registers
39 * @allow_set_time: indicates whether writing to the RTC is allowed 58 * @allow_set_time: indicates whether writing to the RTC is allowed
40 * @rtc_alarm_irq: rtc alarm irq number. 59 * @rtc_alarm_irq: rtc alarm irq number.
41 * @rtc_base: address of rtc control register.
42 * @rtc_read_base: base address of read registers.
43 * @rtc_write_base: base address of write registers.
44 * @alarm_rw_base: base address of alarm registers.
45 * @ctrl_reg: rtc control register. 60 * @ctrl_reg: rtc control register.
46 * @rtc_dev: device structure. 61 * @rtc_dev: device structure.
47 * @ctrl_reg_lock: spinlock protecting access to ctrl_reg. 62 * @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
@@ -51,11 +66,7 @@ struct pm8xxx_rtc {
51 struct regmap *regmap; 66 struct regmap *regmap;
52 bool allow_set_time; 67 bool allow_set_time;
53 int rtc_alarm_irq; 68 int rtc_alarm_irq;
54 int rtc_base; 69 const struct pm8xxx_rtc_regs *regs;
55 int rtc_read_base;
56 int rtc_write_base;
57 int alarm_rw_base;
58 u8 ctrl_reg;
59 struct device *rtc_dev; 70 struct device *rtc_dev;
60 spinlock_t ctrl_reg_lock; 71 spinlock_t ctrl_reg_lock;
61}; 72};
@@ -71,8 +82,10 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
71{ 82{
72 int rc, i; 83 int rc, i;
73 unsigned long secs, irq_flags; 84 unsigned long secs, irq_flags;
74 u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg; 85 u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0;
86 unsigned int ctrl_reg;
75 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 87 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
88 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
76 89
77 if (!rtc_dd->allow_set_time) 90 if (!rtc_dd->allow_set_time)
78 return -EACCES; 91 return -EACCES;
@@ -87,30 +100,30 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
87 dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); 100 dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
88 101
89 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 102 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
90 ctrl_reg = rtc_dd->ctrl_reg;
91 103
92 if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) { 104 rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
105 if (rc)
106 goto rtc_rw_fail;
107
108 if (ctrl_reg & regs->alarm_en) {
93 alarm_enabled = 1; 109 alarm_enabled = 1;
94 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 110 ctrl_reg &= ~regs->alarm_en;
95 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 111 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
96 if (rc) { 112 if (rc) {
97 dev_err(dev, "Write to RTC control register failed\n"); 113 dev_err(dev, "Write to RTC control register failed\n");
98 goto rtc_rw_fail; 114 goto rtc_rw_fail;
99 } 115 }
100 rtc_dd->ctrl_reg = ctrl_reg;
101 } else {
102 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
103 } 116 }
104 117
105 /* Write 0 to Byte[0] */ 118 /* Write 0 to Byte[0] */
106 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0); 119 rc = regmap_write(rtc_dd->regmap, regs->write, 0);
107 if (rc) { 120 if (rc) {
108 dev_err(dev, "Write to RTC write data register failed\n"); 121 dev_err(dev, "Write to RTC write data register failed\n");
109 goto rtc_rw_fail; 122 goto rtc_rw_fail;
110 } 123 }
111 124
112 /* Write Byte[1], Byte[2], Byte[3] */ 125 /* Write Byte[1], Byte[2], Byte[3] */
113 rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1, 126 rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1,
114 &value[1], sizeof(value) - 1); 127 &value[1], sizeof(value) - 1);
115 if (rc) { 128 if (rc) {
116 dev_err(dev, "Write to RTC write data register failed\n"); 129 dev_err(dev, "Write to RTC write data register failed\n");
@@ -118,25 +131,23 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
118 } 131 }
119 132
120 /* Write Byte[0] */ 133 /* Write Byte[0] */
121 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]); 134 rc = regmap_write(rtc_dd->regmap, regs->write, value[0]);
122 if (rc) { 135 if (rc) {
123 dev_err(dev, "Write to RTC write data register failed\n"); 136 dev_err(dev, "Write to RTC write data register failed\n");
124 goto rtc_rw_fail; 137 goto rtc_rw_fail;
125 } 138 }
126 139
127 if (alarm_enabled) { 140 if (alarm_enabled) {
128 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 141 ctrl_reg |= regs->alarm_en;
129 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 142 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
130 if (rc) { 143 if (rc) {
131 dev_err(dev, "Write to RTC control register failed\n"); 144 dev_err(dev, "Write to RTC control register failed\n");
132 goto rtc_rw_fail; 145 goto rtc_rw_fail;
133 } 146 }
134 rtc_dd->ctrl_reg = ctrl_reg;
135 } 147 }
136 148
137rtc_rw_fail: 149rtc_rw_fail:
138 if (alarm_enabled) 150 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
139 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
140 151
141 return rc; 152 return rc;
142} 153}
@@ -148,9 +159,9 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
148 unsigned long secs; 159 unsigned long secs;
149 unsigned int reg; 160 unsigned int reg;
150 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 161 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
162 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
151 163
152 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, 164 rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
153 value, sizeof(value));
154 if (rc) { 165 if (rc) {
155 dev_err(dev, "RTC read data register failed\n"); 166 dev_err(dev, "RTC read data register failed\n");
156 return rc; 167 return rc;
@@ -160,14 +171,14 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
160 * Read the LSB again and check if there has been a carry over. 171 * Read the LSB again and check if there has been a carry over.
161 * If there is, redo the read operation. 172 * If there is, redo the read operation.
162 */ 173 */
163 rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, &reg); 174 rc = regmap_read(rtc_dd->regmap, regs->read, &reg);
164 if (rc < 0) { 175 if (rc < 0) {
165 dev_err(dev, "RTC read data register failed\n"); 176 dev_err(dev, "RTC read data register failed\n");
166 return rc; 177 return rc;
167 } 178 }
168 179
169 if (unlikely(reg < value[0])) { 180 if (unlikely(reg < value[0])) {
170 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, 181 rc = regmap_bulk_read(rtc_dd->regmap, regs->read,
171 value, sizeof(value)); 182 value, sizeof(value));
172 if (rc) { 183 if (rc) {
173 dev_err(dev, "RTC read data register failed\n"); 184 dev_err(dev, "RTC read data register failed\n");
@@ -195,9 +206,11 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
195static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) 206static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
196{ 207{
197 int rc, i; 208 int rc, i;
198 u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg; 209 u8 value[NUM_8_BIT_RTC_REGS];
210 unsigned int ctrl_reg;
199 unsigned long secs, irq_flags; 211 unsigned long secs, irq_flags;
200 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 212 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
213 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
201 214
202 rtc_tm_to_time(&alarm->time, &secs); 215 rtc_tm_to_time(&alarm->time, &secs);
203 216
@@ -208,28 +221,28 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
208 221
209 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 222 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
210 223
211 rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, 224 rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
212 sizeof(value)); 225 sizeof(value));
213 if (rc) { 226 if (rc) {
214 dev_err(dev, "Write to RTC ALARM register failed\n"); 227 dev_err(dev, "Write to RTC ALARM register failed\n");
215 goto rtc_rw_fail; 228 goto rtc_rw_fail;
216 } 229 }
217 230
218 ctrl_reg = rtc_dd->ctrl_reg; 231 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
232 if (rc)
233 goto rtc_rw_fail;
219 234
220 if (alarm->enabled) 235 if (alarm->enabled)
221 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 236 ctrl_reg |= regs->alarm_en;
222 else 237 else
223 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 238 ctrl_reg &= ~regs->alarm_en;
224 239
225 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 240 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
226 if (rc) { 241 if (rc) {
227 dev_err(dev, "Write to RTC control register failed\n"); 242 dev_err(dev, "Write to RTC alarm control register failed\n");
228 goto rtc_rw_fail; 243 goto rtc_rw_fail;
229 } 244 }
230 245
231 rtc_dd->ctrl_reg = ctrl_reg;
232
233 dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", 246 dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
234 alarm->time.tm_hour, alarm->time.tm_min, 247 alarm->time.tm_hour, alarm->time.tm_min,
235 alarm->time.tm_sec, alarm->time.tm_mday, 248 alarm->time.tm_sec, alarm->time.tm_mday,
@@ -245,8 +258,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
245 u8 value[NUM_8_BIT_RTC_REGS]; 258 u8 value[NUM_8_BIT_RTC_REGS];
246 unsigned long secs; 259 unsigned long secs;
247 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 260 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
261 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
248 262
249 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, 263 rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value,
250 sizeof(value)); 264 sizeof(value));
251 if (rc) { 265 if (rc) {
252 dev_err(dev, "RTC alarm time read failed\n"); 266 dev_err(dev, "RTC alarm time read failed\n");
@@ -276,25 +290,26 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
276 int rc; 290 int rc;
277 unsigned long irq_flags; 291 unsigned long irq_flags;
278 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 292 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
279 u8 ctrl_reg; 293 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
294 unsigned int ctrl_reg;
280 295
281 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 296 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
282 297
283 ctrl_reg = rtc_dd->ctrl_reg; 298 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
299 if (rc)
300 goto rtc_rw_fail;
284 301
285 if (enable) 302 if (enable)
286 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 303 ctrl_reg |= regs->alarm_en;
287 else 304 else
288 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 305 ctrl_reg &= ~regs->alarm_en;
289 306
290 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 307 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
291 if (rc) { 308 if (rc) {
292 dev_err(dev, "Write to RTC control register failed\n"); 309 dev_err(dev, "Write to RTC control register failed\n");
293 goto rtc_rw_fail; 310 goto rtc_rw_fail;
294 } 311 }
295 312
296 rtc_dd->ctrl_reg = ctrl_reg;
297
298rtc_rw_fail: 313rtc_rw_fail:
299 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 314 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
300 return rc; 315 return rc;
@@ -311,6 +326,7 @@ static const struct rtc_class_ops pm8xxx_rtc_ops = {
311static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) 326static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
312{ 327{
313 struct pm8xxx_rtc *rtc_dd = dev_id; 328 struct pm8xxx_rtc *rtc_dd = dev_id;
329 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
314 unsigned int ctrl_reg; 330 unsigned int ctrl_reg;
315 int rc; 331 int rc;
316 unsigned long irq_flags; 332 unsigned long irq_flags;
@@ -320,48 +336,100 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
320 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 336 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
321 337
322 /* Clear the alarm enable bit */ 338 /* Clear the alarm enable bit */
323 ctrl_reg = rtc_dd->ctrl_reg; 339 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
324 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 340 if (rc) {
341 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
342 goto rtc_alarm_handled;
343 }
344
345 ctrl_reg &= ~regs->alarm_en;
325 346
326 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 347 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
327 if (rc) { 348 if (rc) {
328 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 349 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
329 dev_err(rtc_dd->rtc_dev, 350 dev_err(rtc_dd->rtc_dev,
330 "Write to RTC control register failed\n"); 351 "Write to alarm control register failed\n");
331 goto rtc_alarm_handled; 352 goto rtc_alarm_handled;
332 } 353 }
333 354
334 rtc_dd->ctrl_reg = ctrl_reg;
335 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 355 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
336 356
337 /* Clear RTC alarm register */ 357 /* Clear RTC alarm register */
338 rc = regmap_read(rtc_dd->regmap, 358 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
339 rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
340 &ctrl_reg);
341 if (rc) { 359 if (rc) {
342 dev_err(rtc_dd->rtc_dev, 360 dev_err(rtc_dd->rtc_dev,
343 "RTC Alarm control register read failed\n"); 361 "RTC Alarm control2 register read failed\n");
344 goto rtc_alarm_handled; 362 goto rtc_alarm_handled;
345 } 363 }
346 364
347 ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR; 365 ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR;
348 rc = regmap_write(rtc_dd->regmap, 366 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg);
349 rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
350 ctrl_reg);
351 if (rc) 367 if (rc)
352 dev_err(rtc_dd->rtc_dev, 368 dev_err(rtc_dd->rtc_dev,
353 "Write to RTC Alarm control register failed\n"); 369 "Write to RTC Alarm control2 register failed\n");
354 370
355rtc_alarm_handled: 371rtc_alarm_handled:
356 return IRQ_HANDLED; 372 return IRQ_HANDLED;
357} 373}
358 374
375static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd)
376{
377 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
378 unsigned int ctrl_reg;
379 int rc;
380
381 /* Check if the RTC is on, else turn it on */
382 rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
383 if (rc)
384 return rc;
385
386 if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
387 ctrl_reg |= PM8xxx_RTC_ENABLE;
388 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
389 if (rc)
390 return rc;
391 }
392
393 return 0;
394}
395
396static const struct pm8xxx_rtc_regs pm8921_regs = {
397 .ctrl = 0x11d,
398 .write = 0x11f,
399 .read = 0x123,
400 .alarm_rw = 0x127,
401 .alarm_ctrl = 0x11d,
402 .alarm_ctrl2 = 0x11e,
403 .alarm_en = BIT(1),
404};
405
406static const struct pm8xxx_rtc_regs pm8058_regs = {
407 .ctrl = 0x1e8,
408 .write = 0x1ea,
409 .read = 0x1ee,
410 .alarm_rw = 0x1f2,
411 .alarm_ctrl = 0x1e8,
412 .alarm_ctrl2 = 0x1e9,
413 .alarm_en = BIT(1),
414};
415
416static const struct pm8xxx_rtc_regs pm8941_regs = {
417 .ctrl = 0x6046,
418 .write = 0x6040,
419 .read = 0x6048,
420 .alarm_rw = 0x6140,
421 .alarm_ctrl = 0x6146,
422 .alarm_ctrl2 = 0x6148,
423 .alarm_en = BIT(7),
424};
425
359/* 426/*
360 * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out 427 * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
361 */ 428 */
362static const struct of_device_id pm8xxx_id_table[] = { 429static const struct of_device_id pm8xxx_id_table[] = {
363 { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D }, 430 { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
364 { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 }, 431 { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
432 { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
365 { }, 433 { },
366}; 434};
367MODULE_DEVICE_TABLE(of, pm8xxx_id_table); 435MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
@@ -369,7 +437,6 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
369static int pm8xxx_rtc_probe(struct platform_device *pdev) 437static int pm8xxx_rtc_probe(struct platform_device *pdev)
370{ 438{
371 int rc; 439 int rc;
372 unsigned int ctrl_reg;
373 struct pm8xxx_rtc *rtc_dd; 440 struct pm8xxx_rtc *rtc_dd;
374 const struct of_device_id *match; 441 const struct of_device_id *match;
375 442
@@ -399,33 +466,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
399 rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node, 466 rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
400 "allow-set-time"); 467 "allow-set-time");
401 468
402 rtc_dd->rtc_base = (long) match->data; 469 rtc_dd->regs = match->data;
403
404 /* Setup RTC register addresses */
405 rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
406 rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
407 rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
408
409 rtc_dd->rtc_dev = &pdev->dev; 470 rtc_dd->rtc_dev = &pdev->dev;
410 471
411 /* Check if the RTC is on, else turn it on */ 472 rc = pm8xxx_rtc_enable(rtc_dd);
412 rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg); 473 if (rc)
413 if (rc) {
414 dev_err(&pdev->dev, "RTC control register read failed!\n");
415 return rc; 474 return rc;
416 }
417
418 if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
419 ctrl_reg |= PM8xxx_RTC_ENABLE;
420 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
421 if (rc) {
422 dev_err(&pdev->dev,
423 "Write to RTC control register failed\n");
424 return rc;
425 }
426 }
427
428 rtc_dd->ctrl_reg = ctrl_reg;
429 475
430 platform_set_drvdata(pdev, rtc_dd); 476 platform_set_drvdata(pdev, rtc_dd);
431 477
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a6b1252c9941..806072238c00 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -535,13 +535,15 @@ static int s3c_rtc_probe(struct platform_device *pdev)
535 } 535 }
536 clk_prepare_enable(info->rtc_clk); 536 clk_prepare_enable(info->rtc_clk);
537 537
538 info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); 538 if (info->data->needs_src_clk) {
539 if (IS_ERR(info->rtc_src_clk)) { 539 info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
540 dev_err(&pdev->dev, "failed to find rtc source clock\n"); 540 if (IS_ERR(info->rtc_src_clk)) {
541 return PTR_ERR(info->rtc_src_clk); 541 dev_err(&pdev->dev,
542 "failed to find rtc source clock\n");
543 return PTR_ERR(info->rtc_src_clk);
544 }
545 clk_prepare_enable(info->rtc_src_clk);
542 } 546 }
543 clk_prepare_enable(info->rtc_src_clk);
544
545 547
546 /* check to see if everything is setup correctly */ 548 /* check to see if everything is setup correctly */
547 if (info->data->enable) 549 if (info->data->enable)
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index dc24ecfac2d1..db2cb1f8a1b5 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -105,7 +105,7 @@ config SCLP_ASYNC
105config HMC_DRV 105config HMC_DRV
106 def_tristate m 106 def_tristate m
107 prompt "Support for file transfers from HMC drive CD/DVD-ROM" 107 prompt "Support for file transfers from HMC drive CD/DVD-ROM"
108 depends on 64BIT 108 depends on S390 && 64BIT
109 select CRC16 109 select CRC16
110 help 110 help
111 This option enables support for file transfers from a Hardware 111 This option enables support for file transfers from a Hardware
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 5fd73d77c3af..58cecd45b0f5 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -4,7 +4,7 @@
4# Copyright (C) 2008 Panasas Inc. All rights reserved. 4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5# 5#
6# Authors: 6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com> 7# Boaz Harrosh <ooo@electrozaur.com>
8# Benny Halevy <bhalevy@panasas.com> 8# Benny Halevy <bhalevy@panasas.com>
9# 9#
10# This program is free software; you can redistribute it and/or modify 10# This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig
index a0703514eb0f..347cc5e33749 100644
--- a/drivers/scsi/osd/Kconfig
+++ b/drivers/scsi/osd/Kconfig
@@ -4,7 +4,7 @@
4# Copyright (C) 2008 Panasas Inc. All rights reserved. 4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5# 5#
6# Authors: 6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com> 7# Boaz Harrosh <ooo@electrozaur.com>
8# Benny Halevy <bhalevy@panasas.com> 8# Benny Halevy <bhalevy@panasas.com>
9# 9#
10# This program is free software; you can redistribute it and/or modify 10# This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h
index 579e491f11df..26341261bb5c 100644
--- a/drivers/scsi/osd/osd_debug.h
+++ b/drivers/scsi/osd/osd_debug.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fd19fd8468ac..488c3929f19a 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2008 Panasas Inc. All rights reserved. 7 * Copyright (C) 2008 Panasas Inc. All rights reserved.
8 * 8 *
9 * Authors: 9 * Authors:
10 * Boaz Harrosh <bharrosh@panasas.com> 10 * Boaz Harrosh <ooo@electrozaur.com>
11 * Benny Halevy <bhalevy@panasas.com> 11 * Benny Halevy <bhalevy@panasas.com>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
@@ -57,7 +57,7 @@
57 57
58enum { OSD_REQ_RETRIES = 1 }; 58enum { OSD_REQ_RETRIES = 1 };
59 59
60MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); 60MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
61MODULE_DESCRIPTION("open-osd initiator library libosd.ko"); 61MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index e1d9a4c4c4b3..92cdd4b06526 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -10,7 +10,7 @@
10 * Copyright (C) 2008 Panasas Inc. All rights reserved. 10 * Copyright (C) 2008 Panasas Inc. All rights reserved.
11 * 11 *
12 * Authors: 12 * Authors:
13 * Boaz Harrosh <bharrosh@panasas.com> 13 * Boaz Harrosh <ooo@electrozaur.com>
14 * Benny Halevy <bhalevy@panasas.com> 14 * Benny Halevy <bhalevy@panasas.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or modify 16 * This program is free software; you can redistribute it and/or modify
@@ -74,7 +74,7 @@
74static const char osd_name[] = "osd"; 74static const char osd_name[] = "osd";
75static const char *osd_version_string = "open-osd 0.2.1"; 75static const char *osd_version_string = "open-osd 0.2.1";
76 76
77MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); 77MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
78MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); 78MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
79MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
80MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR); 80MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 829752cfd73f..a902fa1db7af 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -112,6 +112,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd); 112 struct qla_tgt_cmd *cmd);
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha);
115/* 116/*
116 * Global Variables 117 * Global Variables
117 */ 118 */
@@ -210,7 +211,7 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
210 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
211} 212}
212 213
213void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 214static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
214 struct atio_from_isp *atio) 215 struct atio_from_isp *atio)
215{ 216{
216 ql_dbg(ql_dbg_tgt, vha, 0xe072, 217 ql_dbg(ql_dbg_tgt, vha, 0xe072,
@@ -433,7 +434,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
433#if 0 /* FIXME: Re-enable Global event handling.. */ 434#if 0 /* FIXME: Re-enable Global event handling.. */
434 /* Global event */ 435 /* Global event */
435 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
436 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 437 qlt_clear_tgt_db(ha->tgt.qla_tgt);
437 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
438 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
439 typeof(*sess), sess_list_entry); 440 typeof(*sess), sess_list_entry);
@@ -515,7 +516,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
515} 516}
516 517
517/* ha->hardware_lock supposed to be held on entry */ 518/* ha->hardware_lock supposed to be held on entry */
518static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 519static void qlt_clear_tgt_db(struct qla_tgt *tgt)
519{ 520{
520 struct qla_tgt_sess *sess; 521 struct qla_tgt_sess *sess;
521 522
@@ -867,7 +868,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
867 mutex_lock(&vha->vha_tgt.tgt_mutex); 868 mutex_lock(&vha->vha_tgt.tgt_mutex);
868 spin_lock_irqsave(&ha->hardware_lock, flags); 869 spin_lock_irqsave(&ha->hardware_lock, flags);
869 tgt->tgt_stop = 1; 870 tgt->tgt_stop = 1;
870 qlt_clear_tgt_db(tgt, true); 871 qlt_clear_tgt_db(tgt);
871 spin_unlock_irqrestore(&ha->hardware_lock, flags); 872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 mutex_unlock(&vha->vha_tgt.tgt_mutex); 873 mutex_unlock(&vha->vha_tgt.tgt_mutex);
873 mutex_unlock(&qla_tgt_mutex); 874 mutex_unlock(&qla_tgt_mutex);
@@ -1462,12 +1463,13 @@ out_err:
1462 return -1; 1463 return -1;
1463} 1464}
1464 1465
1465static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1466static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1466 struct qla_tgt_cmd *cmd)
1467{ 1467{
1468 struct qla_hw_data *ha = vha->hw; 1468 struct qla_hw_data *ha = vha->hw;
1469 1469
1470 BUG_ON(!cmd->sg_mapped); 1470 if (!cmd->sg_mapped)
1471 return;
1472
1471 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1473 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1472 cmd->sg_mapped = 0; 1474 cmd->sg_mapped = 0;
1473 1475
@@ -2428,8 +2430,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2428 return 0; 2430 return 0;
2429 2431
2430out_unmap_unlock: 2432out_unmap_unlock:
2431 if (cmd->sg_mapped) 2433 qlt_unmap_sg(vha, cmd);
2432 qlt_unmap_sg(vha, cmd);
2433 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2434 2435
2435 return res; 2436 return res;
@@ -2506,8 +2507,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2506 return res; 2507 return res;
2507 2508
2508out_unlock_free_unmap: 2509out_unlock_free_unmap:
2509 if (cmd->sg_mapped) 2510 qlt_unmap_sg(vha, cmd);
2510 qlt_unmap_sg(vha, cmd);
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2512 2512
2513 return res; 2513 return res;
@@ -2741,8 +2741,7 @@ done:
2741 if (!ha_locked && !in_interrupt()) 2741 if (!ha_locked && !in_interrupt())
2742 msleep(250); /* just in case */ 2742 msleep(250); /* just in case */
2743 2743
2744 if (cmd->sg_mapped) 2744 qlt_unmap_sg(vha, cmd);
2745 qlt_unmap_sg(vha, cmd);
2746 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2745 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2747 } 2746 }
2748 return; 2747 return;
@@ -3087,8 +3086,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3087 tfo = se_cmd->se_tfo; 3086 tfo = se_cmd->se_tfo;
3088 cmd->cmd_sent_to_fw = 0; 3087 cmd->cmd_sent_to_fw = 0;
3089 3088
3090 if (cmd->sg_mapped) 3089 qlt_unmap_sg(vha, cmd);
3091 qlt_unmap_sg(vha, cmd);
3092 3090
3093 if (unlikely(status != CTIO_SUCCESS)) { 3091 if (unlikely(status != CTIO_SUCCESS)) {
3094 switch (status & 0xFFFF) { 3092 switch (status & 0xFFFF) {
@@ -5343,7 +5341,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
5343EXPORT_SYMBOL(qlt_lport_deregister); 5341EXPORT_SYMBOL(qlt_lport_deregister);
5344 5342
5345/* Must be called under HW lock */ 5343/* Must be called under HW lock */
5346void qlt_set_mode(struct scsi_qla_host *vha) 5344static void qlt_set_mode(struct scsi_qla_host *vha)
5347{ 5345{
5348 struct qla_hw_data *ha = vha->hw; 5346 struct qla_hw_data *ha = vha->hw;
5349 5347
@@ -5364,7 +5362,7 @@ void qlt_set_mode(struct scsi_qla_host *vha)
5364} 5362}
5365 5363
5366/* Must be called under HW lock */ 5364/* Must be called under HW lock */
5367void qlt_clear_mode(struct scsi_qla_host *vha) 5365static void qlt_clear_mode(struct scsi_qla_host *vha)
5368{ 5366{
5369 struct qla_hw_data *ha = vha->hw; 5367 struct qla_hw_data *ha = vha->hw;
5370 5368
@@ -5428,8 +5426,7 @@ EXPORT_SYMBOL(qlt_enable_vha);
5428 * 5426 *
5429 * Disable Target Mode and reset the adapter 5427 * Disable Target Mode and reset the adapter
5430 */ 5428 */
5431void 5429static void qlt_disable_vha(struct scsi_qla_host *vha)
5432qlt_disable_vha(struct scsi_qla_host *vha)
5433{ 5430{
5434 struct qla_hw_data *ha = vha->hw; 5431 struct qla_hw_data *ha = vha->hw;
5435 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5432 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 8ff330f7d6f5..332086776dfe 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1001,11 +1001,11 @@ struct qla_tgt_prm {
1001 struct qla_tgt *tgt; 1001 struct qla_tgt *tgt;
1002 void *pkt; 1002 void *pkt;
1003 struct scatterlist *sg; /* cmd data buffer SG vector */ 1003 struct scatterlist *sg; /* cmd data buffer SG vector */
1004 unsigned char *sense_buffer;
1004 int seg_cnt; 1005 int seg_cnt;
1005 int req_cnt; 1006 int req_cnt;
1006 uint16_t rq_result; 1007 uint16_t rq_result;
1007 uint16_t scsi_status; 1008 uint16_t scsi_status;
1008 unsigned char *sense_buffer;
1009 int sense_buffer_len; 1009 int sense_buffer_len;
1010 int residual; 1010 int residual;
1011 int add_status_pkt; 1011 int add_status_pkt;
@@ -1033,10 +1033,6 @@ struct qla_tgt_srr_ctio {
1033 1033
1034 1034
1035extern struct qla_tgt_data qla_target; 1035extern struct qla_tgt_data qla_target;
1036/*
1037 * Internal function prototypes
1038 */
1039void qlt_disable_vha(struct scsi_qla_host *);
1040 1036
1041/* 1037/*
1042 * Function prototypes for qla_target.c logic used by qla2xxx LLD code. 1038 * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
@@ -1049,8 +1045,6 @@ extern void qlt_lport_deregister(struct scsi_qla_host *);
1049extern void qlt_unreg_sess(struct qla_tgt_sess *); 1045extern void qlt_unreg_sess(struct qla_tgt_sess *);
1050extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1046extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1051extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1047extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
1052extern void qlt_set_mode(struct scsi_qla_host *ha);
1053extern void qlt_clear_mode(struct scsi_qla_host *ha);
1054extern int __init qlt_init(void); 1048extern int __init qlt_init(void);
1055extern void qlt_exit(void); 1049extern void qlt_exit(void);
1056extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1050extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1083,13 +1077,9 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1083/* 1077/*
1084 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1078 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1085 */ 1079 */
1086extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
1087 struct atio_from_isp *);
1088extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1080extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1089extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1081extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1090extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1082extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1091extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
1092extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
1093extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1083extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1094extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1084extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1095extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1085extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 031b2961c6b7..73f9feecda72 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -786,7 +786,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
786 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 786 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
787 787
788 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 788 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
789 WARN_ON(node && (node != se_nacl)); 789 if (WARN_ON(node && (node != se_nacl))) {
790 /*
791 * The nacl no longer matches what we think it should be.
792 * Most likely a new dynamic acl has been added while
793 * someone dropped the hardware lock. It clearly is a
794 * bug elsewhere, but this bit can't make things worse.
795 */
796 btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
797 node, GFP_ATOMIC);
798 }
790 799
791 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 800 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
792 se_nacl, nacl->nport_wwnn, nacl->nport_id); 801 se_nacl, nacl->nport_wwnn, nacl->nport_id);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 729215885250..72e12bad14b9 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -669,6 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
669 master->cleanup = dw_spi_cleanup; 669 master->cleanup = dw_spi_cleanup;
670 master->transfer_one_message = dw_spi_transfer_one_message; 670 master->transfer_one_message = dw_spi_transfer_one_message;
671 master->max_speed_hz = dws->max_freq; 671 master->max_speed_hz = dws->max_freq;
672 master->dev.of_node = dev->of_node;
672 673
673 /* Basic HW init */ 674 /* Basic HW init */
674 spi_hw_init(dws); 675 spi_hw_init(dws);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 835cdda6f4f5..c76b7d7879df 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -454,7 +454,7 @@ static int orion_spi_probe(struct platform_device *pdev)
454 spi->master = master; 454 spi->master = master;
455 455
456 of_id = of_match_device(orion_spi_of_match_table, &pdev->dev); 456 of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
457 devdata = of_id->data; 457 devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
458 spi->devdata = devdata; 458 spi->devdata = devdata;
459 459
460 spi->clk = devm_clk_get(&pdev->dev, NULL); 460 spi->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f35f723816ea..fc2dd8441608 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1106,7 +1106,7 @@ err_rxdesc:
1106 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1106 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1107err_tx_sgmap: 1107err_tx_sgmap:
1108 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1108 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1109 pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1109 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1110err_rx_sgmap: 1110err_rx_sgmap:
1111 sg_free_table(&pl022->sgt_tx); 1111 sg_free_table(&pl022->sgt_tx);
1112err_alloc_tx_sg: 1112err_alloc_tx_sg:
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index f96ea8a38d64..87bc16f491f0 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -145,6 +145,9 @@
145#define RXBUSY (1 << 0) 145#define RXBUSY (1 << 0)
146#define TXBUSY (1 << 1) 146#define TXBUSY (1 << 1)
147 147
148/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
149#define MAX_SCLK_OUT 50000000
150
148enum rockchip_ssi_type { 151enum rockchip_ssi_type {
149 SSI_MOTO_SPI = 0, 152 SSI_MOTO_SPI = 0,
150 SSI_TI_SSP, 153 SSI_TI_SSP,
@@ -325,6 +328,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
325 328
326 spin_unlock_irqrestore(&rs->lock, flags); 329 spin_unlock_irqrestore(&rs->lock, flags);
327 330
331 spi_enable_chip(rs, 0);
332
328 return 0; 333 return 0;
329} 334}
330 335
@@ -381,6 +386,8 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
381 if (rs->tx) 386 if (rs->tx)
382 wait_for_idle(rs); 387 wait_for_idle(rs);
383 388
389 spi_enable_chip(rs, 0);
390
384 return 0; 391 return 0;
385} 392}
386 393
@@ -392,8 +399,10 @@ static void rockchip_spi_dma_rxcb(void *data)
392 spin_lock_irqsave(&rs->lock, flags); 399 spin_lock_irqsave(&rs->lock, flags);
393 400
394 rs->state &= ~RXBUSY; 401 rs->state &= ~RXBUSY;
395 if (!(rs->state & TXBUSY)) 402 if (!(rs->state & TXBUSY)) {
403 spi_enable_chip(rs, 0);
396 spi_finalize_current_transfer(rs->master); 404 spi_finalize_current_transfer(rs->master);
405 }
397 406
398 spin_unlock_irqrestore(&rs->lock, flags); 407 spin_unlock_irqrestore(&rs->lock, flags);
399} 408}
@@ -409,8 +418,10 @@ static void rockchip_spi_dma_txcb(void *data)
409 spin_lock_irqsave(&rs->lock, flags); 418 spin_lock_irqsave(&rs->lock, flags);
410 419
411 rs->state &= ~TXBUSY; 420 rs->state &= ~TXBUSY;
412 if (!(rs->state & RXBUSY)) 421 if (!(rs->state & RXBUSY)) {
422 spi_enable_chip(rs, 0);
413 spi_finalize_current_transfer(rs->master); 423 spi_finalize_current_transfer(rs->master);
424 }
414 425
415 spin_unlock_irqrestore(&rs->lock, flags); 426 spin_unlock_irqrestore(&rs->lock, flags);
416} 427}
@@ -496,12 +507,19 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
496 dmacr |= RF_DMA_EN; 507 dmacr |= RF_DMA_EN;
497 } 508 }
498 509
510 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
511 rs->speed = MAX_SCLK_OUT;
512
513 /* the minimum divsor is 2 */
514 if (rs->max_freq < 2 * rs->speed) {
515 clk_set_rate(rs->spiclk, 2 * rs->speed);
516 rs->max_freq = clk_get_rate(rs->spiclk);
517 }
518
499 /* div doesn't support odd number */ 519 /* div doesn't support odd number */
500 div = max_t(u32, rs->max_freq / rs->speed, 1); 520 div = max_t(u32, rs->max_freq / rs->speed, 1);
501 div = (div + 1) & 0xfffe; 521 div = (div + 1) & 0xfffe;
502 522
503 spi_enable_chip(rs, 0);
504
505 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); 523 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
506 524
507 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); 525 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
@@ -515,8 +533,6 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
515 spi_set_clk(rs, div); 533 spi_set_clk(rs, div);
516 534
517 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); 535 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
518
519 spi_enable_chip(rs, 1);
520} 536}
521 537
522static int rockchip_spi_transfer_one( 538static int rockchip_spi_transfer_one(
@@ -524,7 +540,7 @@ static int rockchip_spi_transfer_one(
524 struct spi_device *spi, 540 struct spi_device *spi,
525 struct spi_transfer *xfer) 541 struct spi_transfer *xfer)
526{ 542{
527 int ret = 0; 543 int ret = 1;
528 struct rockchip_spi *rs = spi_master_get_devdata(master); 544 struct rockchip_spi *rs = spi_master_get_devdata(master);
529 545
530 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && 546 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -556,17 +572,27 @@ static int rockchip_spi_transfer_one(
556 rs->tmode = CR0_XFM_RO; 572 rs->tmode = CR0_XFM_RO;
557 573
558 /* we need prepare dma before spi was enabled */ 574 /* we need prepare dma before spi was enabled */
559 if (master->can_dma && master->can_dma(master, spi, xfer)) { 575 if (master->can_dma && master->can_dma(master, spi, xfer))
560 rs->use_dma = 1; 576 rs->use_dma = 1;
561 rockchip_spi_prepare_dma(rs); 577 else
562 } else {
563 rs->use_dma = 0; 578 rs->use_dma = 0;
564 }
565 579
566 rockchip_spi_config(rs); 580 rockchip_spi_config(rs);
567 581
568 if (!rs->use_dma) 582 if (rs->use_dma) {
583 if (rs->tmode == CR0_XFM_RO) {
584 /* rx: dma must be prepared first */
585 rockchip_spi_prepare_dma(rs);
586 spi_enable_chip(rs, 1);
587 } else {
588 /* tx or tr: spi must be enabled first */
589 spi_enable_chip(rs, 1);
590 rockchip_spi_prepare_dma(rs);
591 }
592 } else {
593 spi_enable_chip(rs, 1);
569 ret = rockchip_spi_pio_transfer(rs); 594 ret = rockchip_spi_pio_transfer(rs);
595 }
570 596
571 return ret; 597 return ret;
572} 598}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index e3bc23bb5883..e50039fb1474 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -82,10 +82,11 @@ struct spidev_data {
82 struct spi_device *spi; 82 struct spi_device *spi;
83 struct list_head device_entry; 83 struct list_head device_entry;
84 84
85 /* buffer is NULL unless this device is open (users > 0) */ 85 /* TX/RX buffers are NULL unless this device is open (users > 0) */
86 struct mutex buf_lock; 86 struct mutex buf_lock;
87 unsigned users; 87 unsigned users;
88 u8 *buffer; 88 u8 *tx_buffer;
89 u8 *rx_buffer;
89}; 90};
90 91
91static LIST_HEAD(device_list); 92static LIST_HEAD(device_list);
@@ -135,7 +136,7 @@ static inline ssize_t
135spidev_sync_write(struct spidev_data *spidev, size_t len) 136spidev_sync_write(struct spidev_data *spidev, size_t len)
136{ 137{
137 struct spi_transfer t = { 138 struct spi_transfer t = {
138 .tx_buf = spidev->buffer, 139 .tx_buf = spidev->tx_buffer,
139 .len = len, 140 .len = len,
140 }; 141 };
141 struct spi_message m; 142 struct spi_message m;
@@ -149,7 +150,7 @@ static inline ssize_t
149spidev_sync_read(struct spidev_data *spidev, size_t len) 150spidev_sync_read(struct spidev_data *spidev, size_t len)
150{ 151{
151 struct spi_transfer t = { 152 struct spi_transfer t = {
152 .rx_buf = spidev->buffer, 153 .rx_buf = spidev->rx_buffer,
153 .len = len, 154 .len = len,
154 }; 155 };
155 struct spi_message m; 156 struct spi_message m;
@@ -179,7 +180,7 @@ spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
179 if (status > 0) { 180 if (status > 0) {
180 unsigned long missing; 181 unsigned long missing;
181 182
182 missing = copy_to_user(buf, spidev->buffer, status); 183 missing = copy_to_user(buf, spidev->rx_buffer, status);
183 if (missing == status) 184 if (missing == status)
184 status = -EFAULT; 185 status = -EFAULT;
185 else 186 else
@@ -206,7 +207,7 @@ spidev_write(struct file *filp, const char __user *buf,
206 spidev = filp->private_data; 207 spidev = filp->private_data;
207 208
208 mutex_lock(&spidev->buf_lock); 209 mutex_lock(&spidev->buf_lock);
209 missing = copy_from_user(spidev->buffer, buf, count); 210 missing = copy_from_user(spidev->tx_buffer, buf, count);
210 if (missing == 0) 211 if (missing == 0)
211 status = spidev_sync_write(spidev, count); 212 status = spidev_sync_write(spidev, count);
212 else 213 else
@@ -224,7 +225,7 @@ static int spidev_message(struct spidev_data *spidev,
224 struct spi_transfer *k_tmp; 225 struct spi_transfer *k_tmp;
225 struct spi_ioc_transfer *u_tmp; 226 struct spi_ioc_transfer *u_tmp;
226 unsigned n, total; 227 unsigned n, total;
227 u8 *buf; 228 u8 *tx_buf, *rx_buf;
228 int status = -EFAULT; 229 int status = -EFAULT;
229 230
230 spi_message_init(&msg); 231 spi_message_init(&msg);
@@ -236,7 +237,8 @@ static int spidev_message(struct spidev_data *spidev,
236 * We walk the array of user-provided transfers, using each one 237 * We walk the array of user-provided transfers, using each one
237 * to initialize a kernel version of the same transfer. 238 * to initialize a kernel version of the same transfer.
238 */ 239 */
239 buf = spidev->buffer; 240 tx_buf = spidev->tx_buffer;
241 rx_buf = spidev->rx_buffer;
240 total = 0; 242 total = 0;
241 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; 243 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
242 n; 244 n;
@@ -250,20 +252,21 @@ static int spidev_message(struct spidev_data *spidev,
250 } 252 }
251 253
252 if (u_tmp->rx_buf) { 254 if (u_tmp->rx_buf) {
253 k_tmp->rx_buf = buf; 255 k_tmp->rx_buf = rx_buf;
254 if (!access_ok(VERIFY_WRITE, (u8 __user *) 256 if (!access_ok(VERIFY_WRITE, (u8 __user *)
255 (uintptr_t) u_tmp->rx_buf, 257 (uintptr_t) u_tmp->rx_buf,
256 u_tmp->len)) 258 u_tmp->len))
257 goto done; 259 goto done;
258 } 260 }
259 if (u_tmp->tx_buf) { 261 if (u_tmp->tx_buf) {
260 k_tmp->tx_buf = buf; 262 k_tmp->tx_buf = tx_buf;
261 if (copy_from_user(buf, (const u8 __user *) 263 if (copy_from_user(tx_buf, (const u8 __user *)
262 (uintptr_t) u_tmp->tx_buf, 264 (uintptr_t) u_tmp->tx_buf,
263 u_tmp->len)) 265 u_tmp->len))
264 goto done; 266 goto done;
265 } 267 }
266 buf += k_tmp->len; 268 tx_buf += k_tmp->len;
269 rx_buf += k_tmp->len;
267 270
268 k_tmp->cs_change = !!u_tmp->cs_change; 271 k_tmp->cs_change = !!u_tmp->cs_change;
269 k_tmp->tx_nbits = u_tmp->tx_nbits; 272 k_tmp->tx_nbits = u_tmp->tx_nbits;
@@ -290,17 +293,17 @@ static int spidev_message(struct spidev_data *spidev,
290 goto done; 293 goto done;
291 294
292 /* copy any rx data out of bounce buffer */ 295 /* copy any rx data out of bounce buffer */
293 buf = spidev->buffer; 296 rx_buf = spidev->rx_buffer;
294 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { 297 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
295 if (u_tmp->rx_buf) { 298 if (u_tmp->rx_buf) {
296 if (__copy_to_user((u8 __user *) 299 if (__copy_to_user((u8 __user *)
297 (uintptr_t) u_tmp->rx_buf, buf, 300 (uintptr_t) u_tmp->rx_buf, rx_buf,
298 u_tmp->len)) { 301 u_tmp->len)) {
299 status = -EFAULT; 302 status = -EFAULT;
300 goto done; 303 goto done;
301 } 304 }
302 } 305 }
303 buf += u_tmp->len; 306 rx_buf += u_tmp->len;
304 } 307 }
305 status = total; 308 status = total;
306 309
@@ -508,22 +511,41 @@ static int spidev_open(struct inode *inode, struct file *filp)
508 break; 511 break;
509 } 512 }
510 } 513 }
511 if (status == 0) { 514
512 if (!spidev->buffer) { 515 if (status) {
513 spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); 516 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
514 if (!spidev->buffer) { 517 goto err_find_dev;
518 }
519
520 if (!spidev->tx_buffer) {
521 spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
522 if (!spidev->tx_buffer) {
515 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); 523 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
516 status = -ENOMEM; 524 status = -ENOMEM;
525 goto err_find_dev;
517 } 526 }
518 } 527 }
519 if (status == 0) { 528
520 spidev->users++; 529 if (!spidev->rx_buffer) {
521 filp->private_data = spidev; 530 spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
522 nonseekable_open(inode, filp); 531 if (!spidev->rx_buffer) {
532 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
533 status = -ENOMEM;
534 goto err_alloc_rx_buf;
523 } 535 }
524 } else 536 }
525 pr_debug("spidev: nothing for minor %d\n", iminor(inode)); 537
538 spidev->users++;
539 filp->private_data = spidev;
540 nonseekable_open(inode, filp);
541
542 mutex_unlock(&device_list_lock);
543 return 0;
526 544
545err_alloc_rx_buf:
546 kfree(spidev->tx_buffer);
547 spidev->tx_buffer = NULL;
548err_find_dev:
527 mutex_unlock(&device_list_lock); 549 mutex_unlock(&device_list_lock);
528 return status; 550 return status;
529} 551}
@@ -542,8 +564,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
542 if (!spidev->users) { 564 if (!spidev->users) {
543 int dofree; 565 int dofree;
544 566
545 kfree(spidev->buffer); 567 kfree(spidev->tx_buffer);
546 spidev->buffer = NULL; 568 spidev->tx_buffer = NULL;
569
570 kfree(spidev->rx_buffer);
571 spidev->rx_buffer = NULL;
547 572
548 /* ... after we unbound from the underlying device? */ 573 /* ... after we unbound from the underlying device? */
549 spin_lock_irq(&spidev->spi_lock); 574 spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index dc2d84ac5a0e..81d44c477a5b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,6 +31,13 @@ config TCM_PSCSI
31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered 31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
32 passthrough access to Linux/SCSI device 32 passthrough access to Linux/SCSI device
33 33
34config TCM_USER
35 tristate "TCM/USER Subsystem Plugin for Linux"
36 depends on UIO && NET
37 help
38 Say Y here to enable the TCM/USER subsystem plugin for a userspace
39 process to handle requests
40
34source "drivers/target/loopback/Kconfig" 41source "drivers/target/loopback/Kconfig"
35source "drivers/target/tcm_fc/Kconfig" 42source "drivers/target/tcm_fc/Kconfig"
36source "drivers/target/iscsi/Kconfig" 43source "drivers/target/iscsi/Kconfig"
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 85b012d2f89b..bbb4a7d638ef 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o 22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o 23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o 24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
25obj-$(CONFIG_TCM_USER) += target_core_user.o
25 26
26# Fabric modules 27# Fabric modules
27obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 28obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 260c3e1e312c..b19e4329ba00 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3709,7 +3709,6 @@ static inline void iscsit_thread_check_cpumask(
3709 struct task_struct *p, 3709 struct task_struct *p,
3710 int mode) 3710 int mode)
3711{ 3711{
3712 char buf[128];
3713 /* 3712 /*
3714 * mode == 1 signals iscsi_target_tx_thread() usage. 3713 * mode == 1 signals iscsi_target_tx_thread() usage.
3715 * mode == 0 signals iscsi_target_rx_thread() usage. 3714 * mode == 0 signals iscsi_target_rx_thread() usage.
@@ -3728,8 +3727,6 @@ static inline void iscsit_thread_check_cpumask(
3728 * both TX and RX kthreads are scheduled to run on the 3727 * both TX and RX kthreads are scheduled to run on the
3729 * same CPU. 3728 * same CPU.
3730 */ 3729 */
3731 memset(buf, 0, 128);
3732 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3733 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3730 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3734} 3731}
3735 3732
@@ -4326,8 +4323,7 @@ int iscsit_close_connection(
4326 if (conn->conn_tx_hash.tfm) 4323 if (conn->conn_tx_hash.tfm)
4327 crypto_free_hash(conn->conn_tx_hash.tfm); 4324 crypto_free_hash(conn->conn_tx_hash.tfm);
4328 4325
4329 if (conn->conn_cpumask) 4326 free_cpumask_var(conn->conn_cpumask);
4330 free_cpumask_var(conn->conn_cpumask);
4331 4327
4332 kfree(conn->conn_ops); 4328 kfree(conn->conn_ops);
4333 conn->conn_ops = NULL; 4329 conn->conn_ops = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ae03f3e5de1e..9059c1e0b26e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -669,12 +669,10 @@ static ssize_t lio_target_nacl_show_info(
669 } else { 669 } else {
670 sess = se_sess->fabric_sess_ptr; 670 sess = se_sess->fabric_sess_ptr;
671 671
672 if (sess->sess_ops->InitiatorName) 672 rb += sprintf(page+rb, "InitiatorName: %s\n",
673 rb += sprintf(page+rb, "InitiatorName: %s\n", 673 sess->sess_ops->InitiatorName);
674 sess->sess_ops->InitiatorName); 674 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
675 if (sess->sess_ops->InitiatorAlias) 675 sess->sess_ops->InitiatorAlias);
676 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
677 sess->sess_ops->InitiatorAlias);
678 676
679 rb += sprintf(page+rb, "LIO Session ID: %u " 677 rb += sprintf(page+rb, "LIO Session ID: %u "
680 "ISID: 0x%02x %02x %02x %02x %02x %02x " 678 "ISID: 0x%02x %02x %02x %02x %02x %02x "
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 0d1e6ee3e992..a0ae5fc0ad75 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -345,7 +345,6 @@ static int iscsit_dataout_check_datasn(
345 struct iscsi_cmd *cmd, 345 struct iscsi_cmd *cmd,
346 unsigned char *buf) 346 unsigned char *buf)
347{ 347{
348 int dump = 0, recovery = 0;
349 u32 data_sn = 0; 348 u32 data_sn = 0;
350 struct iscsi_conn *conn = cmd->conn; 349 struct iscsi_conn *conn = cmd->conn;
351 struct iscsi_data *hdr = (struct iscsi_data *) buf; 350 struct iscsi_data *hdr = (struct iscsi_data *) buf;
@@ -370,13 +369,11 @@ static int iscsit_dataout_check_datasn(
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 369 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag, 370 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 be32_to_cpu(hdr->datasn), data_sn); 371 be32_to_cpu(hdr->datasn), data_sn);
373 recovery = 1;
374 goto recover; 372 goto recover;
375 } else if (be32_to_cpu(hdr->datasn) < data_sn) { 373 } else if (be32_to_cpu(hdr->datasn) < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 374 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n", 375 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); 376 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
379 dump = 1;
380 goto dump; 377 goto dump;
381 } 378 }
382 379
@@ -392,8 +389,7 @@ dump:
392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 389 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
393 return DATAOUT_CANNOT_RECOVER; 390 return DATAOUT_CANNOT_RECOVER;
394 391
395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : 392 return DATAOUT_WITHIN_COMMAND_RECOVERY;
396 DATAOUT_NORMAL;
397} 393}
398 394
399static int iscsit_dataout_pre_datapduinorder_yes( 395static int iscsit_dataout_pre_datapduinorder_yes(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 5e71ac609418..480f2e0ecc11 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -978,8 +978,7 @@ int iscsit_setup_np(
978 return 0; 978 return 0;
979fail: 979fail:
980 np->np_socket = NULL; 980 np->np_socket = NULL;
981 if (sock) 981 sock_release(sock);
982 sock_release(sock);
983 return ret; 982 return ret;
984} 983}
985 984
@@ -1190,8 +1189,7 @@ old_sess_out:
1190 if (!IS_ERR(conn->conn_tx_hash.tfm)) 1189 if (!IS_ERR(conn->conn_tx_hash.tfm))
1191 crypto_free_hash(conn->conn_tx_hash.tfm); 1190 crypto_free_hash(conn->conn_tx_hash.tfm);
1192 1191
1193 if (conn->conn_cpumask) 1192 free_cpumask_var(conn->conn_cpumask);
1194 free_cpumask_var(conn->conn_cpumask);
1195 1193
1196 kfree(conn->conn_ops); 1194 kfree(conn->conn_ops);
1197 1195
@@ -1268,8 +1266,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1268 iscsit_put_transport(conn->conn_transport); 1266 iscsit_put_transport(conn->conn_transport);
1269 kfree(conn); 1267 kfree(conn);
1270 conn = NULL; 1268 conn = NULL;
1271 if (ret == -ENODEV)
1272 goto out;
1273 /* Get another socket */ 1269 /* Get another socket */
1274 return 1; 1270 return 1;
1275 } 1271 }
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 73355f4fca74..ce87ce9bdb9c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1481,8 +1481,9 @@ void iscsit_collect_login_stats(
1481 if (conn->param_list) 1481 if (conn->param_list)
1482 intrname = iscsi_find_param_from_key(INITIATORNAME, 1482 intrname = iscsi_find_param_from_key(INITIATORNAME,
1483 conn->param_list); 1483 conn->param_list);
1484 strcpy(ls->last_intr_fail_name, 1484 strlcpy(ls->last_intr_fail_name,
1485 (intrname ? intrname->value : "Unknown")); 1485 (intrname ? intrname->value : "Unknown"),
1486 sizeof(ls->last_intr_fail_name));
1486 1487
1487 ls->last_intr_fail_ip_family = conn->login_family; 1488 ls->last_intr_fail_ip_family = conn->login_family;
1488 1489
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 340de9d92b15..ab3ab27d49b7 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
153/* 153/*
154 * Locate the SAM Task Attr from struct scsi_cmnd * 154 * Locate the SAM Task Attr from struct scsi_cmnd *
155 */ 155 */
156static int tcm_loop_sam_attr(struct scsi_cmnd *sc) 156static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag)
157{ 157{
158 if (sc->device->tagged_supported) { 158 if (sc->device->tagged_supported &&
159 switch (sc->tag) { 159 sc->device->ordered_tags && tag >= 0)
160 case HEAD_OF_QUEUE_TAG: 160 return MSG_ORDERED_TAG;
161 return MSG_HEAD_TAG;
162 case ORDERED_QUEUE_TAG:
163 return MSG_ORDERED_TAG;
164 default:
165 break;
166 }
167 }
168 161
169 return MSG_SIMPLE_TAG; 162 return MSG_SIMPLE_TAG;
170} 163}
@@ -227,7 +220,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
227 220
228 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 221 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
229 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 222 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
230 transfer_length, tcm_loop_sam_attr(sc), 223 transfer_length, tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag),
231 sc->sc_data_direction, 0, 224 sc->sc_data_direction, 0,
232 scsi_sglist(sc), scsi_sg_count(sc), 225 scsi_sglist(sc), scsi_sg_count(sc),
233 sgl_bidi, sgl_bidi_count, 226 sgl_bidi, sgl_bidi_count,
@@ -266,7 +259,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
266 } 259 }
267 260
268 tl_cmd->sc = sc; 261 tl_cmd->sc = sc;
269 tl_cmd->sc_cmd_tag = sc->tag; 262 tl_cmd->sc_cmd_tag = sc->request->tag;
270 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 263 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
271 queue_work(tcm_loop_workqueue, &tl_cmd->work); 264 queue_work(tcm_loop_workqueue, &tl_cmd->work);
272 return 0; 265 return 0;
@@ -370,7 +363,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
370 */ 363 */
371 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 364 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
372 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 365 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
373 sc->tag, TMR_ABORT_TASK); 366 sc->request->tag, TMR_ABORT_TASK);
374 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 367 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
375} 368}
376 369
@@ -960,8 +953,7 @@ static int tcm_loop_port_link(
960 struct tcm_loop_tpg, tl_se_tpg); 953 struct tcm_loop_tpg, tl_se_tpg);
961 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 954 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
962 955
963 atomic_inc(&tl_tpg->tl_tpg_port_count); 956 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
964 smp_mb__after_atomic();
965 /* 957 /*
966 * Add Linux/SCSI struct scsi_device by HCTL 958 * Add Linux/SCSI struct scsi_device by HCTL
967 */ 959 */
@@ -995,8 +987,7 @@ static void tcm_loop_port_unlink(
995 scsi_remove_device(sd); 987 scsi_remove_device(sd);
996 scsi_device_put(sd); 988 scsi_device_put(sd);
997 989
998 atomic_dec(&tl_tpg->tl_tpg_port_count); 990 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
999 smp_mb__after_atomic();
1000 991
1001 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 992 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
1002} 993}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fbc5ebb5f761..fb87780929d2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic();
397 396
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 397 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 398
@@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
403 found = true; 402 found = true;
404 403
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 404 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 405 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic();
408 break; 406 break;
409 } 407 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 408 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
998 * every I_T nexus other than the I_T nexus on which the SET 996 * every I_T nexus other than the I_T nexus on which the SET
999 * TARGET PORT GROUPS command 997 * TARGET PORT GROUPS command
1000 */ 998 */
1001 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 999 atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
1002 smp_mb__after_atomic();
1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1000 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1004 1001
1005 spin_lock_bh(&port->sep_alua_lock); 1002 spin_lock_bh(&port->sep_alua_lock);
@@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1028 spin_unlock_bh(&port->sep_alua_lock); 1025 spin_unlock_bh(&port->sep_alua_lock);
1029 1026
1030 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1027 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1031 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1028 atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
1032 smp_mb__after_atomic();
1033 } 1029 }
1034 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1030 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1035 /* 1031 /*
@@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1063 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1059 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1064 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1065 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1061 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1066 smp_mb__after_atomic();
1067 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1062 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1068 1063
1069 if (tg_pt_gp->tg_pt_gp_transition_complete) 1064 if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt(
1125 */ 1120 */
1126 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1121 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1127 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1122 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1128 smp_mb__after_atomic();
1129 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1123 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1130 1124
1131 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1125 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1168,7 +1162,6 @@ int core_alua_do_port_transition(
1168 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1162 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1169 lu_gp = local_lu_gp_mem->lu_gp; 1163 lu_gp = local_lu_gp_mem->lu_gp;
1170 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1164 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1171 smp_mb__after_atomic();
1172 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1165 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1173 /* 1166 /*
1174 * For storage objects that are members of the 'default_lu_gp', 1167 * For storage objects that are members of the 'default_lu_gp',
@@ -1184,8 +1177,7 @@ int core_alua_do_port_transition(
1184 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1177 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1185 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1178 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1186 new_state, explicit); 1179 new_state, explicit);
1187 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1180 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1188 smp_mb__after_atomic();
1189 return rc; 1181 return rc;
1190 } 1182 }
1191 /* 1183 /*
@@ -1198,8 +1190,7 @@ int core_alua_do_port_transition(
1198 lu_gp_mem_list) { 1190 lu_gp_mem_list) {
1199 1191
1200 dev = lu_gp_mem->lu_gp_mem_dev; 1192 dev = lu_gp_mem->lu_gp_mem_dev;
1201 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1193 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1202 smp_mb__after_atomic();
1203 spin_unlock(&lu_gp->lu_gp_lock); 1194 spin_unlock(&lu_gp->lu_gp_lock);
1204 1195
1205 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1227,8 +1218,7 @@ int core_alua_do_port_transition(
1227 tg_pt_gp->tg_pt_gp_alua_port = NULL; 1218 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1228 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1229 } 1220 }
1230 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1221 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1231 smp_mb__after_atomic();
1232 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1222 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1233 /* 1223 /*
1234 * core_alua_do_transition_tg_pt() will always return 1224 * core_alua_do_transition_tg_pt() will always return
@@ -1238,16 +1228,14 @@ int core_alua_do_port_transition(
1238 new_state, explicit); 1228 new_state, explicit);
1239 1229
1240 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1230 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1241 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1231 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1242 smp_mb__after_atomic();
1243 if (rc) 1232 if (rc)
1244 break; 1233 break;
1245 } 1234 }
1246 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1235 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1247 1236
1248 spin_lock(&lu_gp->lu_gp_lock); 1237 spin_lock(&lu_gp->lu_gp_lock);
1249 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1238 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1250 smp_mb__after_atomic();
1251 } 1239 }
1252 spin_unlock(&lu_gp->lu_gp_lock); 1240 spin_unlock(&lu_gp->lu_gp_lock);
1253 1241
@@ -1260,8 +1248,7 @@ int core_alua_do_port_transition(
1260 core_alua_dump_state(new_state)); 1248 core_alua_dump_state(new_state));
1261 } 1249 }
1262 1250
1263 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1251 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1264 smp_mb__after_atomic();
1265 return rc; 1252 return rc;
1266} 1253}
1267 1254
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 756def38c77a..79f9296a08ae 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -665,6 +665,9 @@ SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
665DEF_DEV_ATTRIB(emulate_rest_reord); 665DEF_DEV_ATTRIB(emulate_rest_reord);
666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); 666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
667 667
668DEF_DEV_ATTRIB(force_pr_aptpl);
669SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
670
668DEF_DEV_ATTRIB_RO(hw_block_size); 671DEF_DEV_ATTRIB_RO(hw_block_size);
669SE_DEV_ATTR_RO(hw_block_size); 672SE_DEV_ATTR_RO(hw_block_size);
670 673
@@ -719,6 +722,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
719 &target_core_dev_attrib_hw_pi_prot_type.attr, 722 &target_core_dev_attrib_hw_pi_prot_type.attr,
720 &target_core_dev_attrib_pi_prot_format.attr, 723 &target_core_dev_attrib_pi_prot_format.attr,
721 &target_core_dev_attrib_enforce_pr_isids.attr, 724 &target_core_dev_attrib_enforce_pr_isids.attr,
725 &target_core_dev_attrib_force_pr_aptpl.attr,
722 &target_core_dev_attrib_is_nonrot.attr, 726 &target_core_dev_attrib_is_nonrot.attr,
723 &target_core_dev_attrib_emulate_rest_reord.attr, 727 &target_core_dev_attrib_emulate_rest_reord.attr,
724 &target_core_dev_attrib_hw_block_size.attr, 728 &target_core_dev_attrib_hw_block_size.attr,
@@ -1263,7 +1267,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1263{ 1267{
1264 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1268 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1265 unsigned char *t_fabric = NULL, *t_port = NULL; 1269 unsigned char *t_fabric = NULL, *t_port = NULL;
1266 char *orig, *ptr, *arg_p, *opts; 1270 char *orig, *ptr, *opts;
1267 substring_t args[MAX_OPT_ARGS]; 1271 substring_t args[MAX_OPT_ARGS];
1268 unsigned long long tmp_ll; 1272 unsigned long long tmp_ll;
1269 u64 sa_res_key = 0; 1273 u64 sa_res_key = 0;
@@ -1295,14 +1299,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1295 token = match_token(ptr, tokens, args); 1299 token = match_token(ptr, tokens, args);
1296 switch (token) { 1300 switch (token) {
1297 case Opt_initiator_fabric: 1301 case Opt_initiator_fabric:
1298 i_fabric = match_strdup(&args[0]); 1302 i_fabric = match_strdup(args);
1299 if (!i_fabric) { 1303 if (!i_fabric) {
1300 ret = -ENOMEM; 1304 ret = -ENOMEM;
1301 goto out; 1305 goto out;
1302 } 1306 }
1303 break; 1307 break;
1304 case Opt_initiator_node: 1308 case Opt_initiator_node:
1305 i_port = match_strdup(&args[0]); 1309 i_port = match_strdup(args);
1306 if (!i_port) { 1310 if (!i_port) {
1307 ret = -ENOMEM; 1311 ret = -ENOMEM;
1308 goto out; 1312 goto out;
@@ -1316,7 +1320,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1316 } 1320 }
1317 break; 1321 break;
1318 case Opt_initiator_sid: 1322 case Opt_initiator_sid:
1319 isid = match_strdup(&args[0]); 1323 isid = match_strdup(args);
1320 if (!isid) { 1324 if (!isid) {
1321 ret = -ENOMEM; 1325 ret = -ENOMEM;
1322 goto out; 1326 goto out;
@@ -1330,15 +1334,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1330 } 1334 }
1331 break; 1335 break;
1332 case Opt_sa_res_key: 1336 case Opt_sa_res_key:
1333 arg_p = match_strdup(&args[0]); 1337 ret = kstrtoull(args->from, 0, &tmp_ll);
1334 if (!arg_p) {
1335 ret = -ENOMEM;
1336 goto out;
1337 }
1338 ret = kstrtoull(arg_p, 0, &tmp_ll);
1339 if (ret < 0) { 1338 if (ret < 0) {
1340 pr_err("kstrtoull() failed for" 1339 pr_err("kstrtoull() failed for sa_res_key=\n");
1341 " sa_res_key=\n");
1342 goto out; 1340 goto out;
1343 } 1341 }
1344 sa_res_key = (u64)tmp_ll; 1342 sa_res_key = (u64)tmp_ll;
@@ -1370,14 +1368,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1370 * PR APTPL Metadata for Target Port 1368 * PR APTPL Metadata for Target Port
1371 */ 1369 */
1372 case Opt_target_fabric: 1370 case Opt_target_fabric:
1373 t_fabric = match_strdup(&args[0]); 1371 t_fabric = match_strdup(args);
1374 if (!t_fabric) { 1372 if (!t_fabric) {
1375 ret = -ENOMEM; 1373 ret = -ENOMEM;
1376 goto out; 1374 goto out;
1377 } 1375 }
1378 break; 1376 break;
1379 case Opt_target_node: 1377 case Opt_target_node:
1380 t_port = match_strdup(&args[0]); 1378 t_port = match_strdup(args);
1381 if (!t_port) { 1379 if (!t_port) {
1382 ret = -ENOMEM; 1380 ret = -ENOMEM;
1383 goto out; 1381 goto out;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 98da90167159..c45f9e907e44 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
224 if (port->sep_rtpi != rtpi) 224 if (port->sep_rtpi != rtpi)
225 continue; 225 continue;
226 226
227 atomic_inc(&deve->pr_ref_count); 227 atomic_inc_mb(&deve->pr_ref_count);
228 smp_mb__after_atomic();
229 spin_unlock_irq(&nacl->device_list_lock); 228 spin_unlock_irq(&nacl->device_list_lock);
230 229
231 return deve; 230 return deve;
@@ -1019,6 +1018,23 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1019 return 0; 1018 return 0;
1020} 1019}
1021 1020
1021int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1022{
1023 if ((flag != 0) && (flag != 1)) {
1024 printk(KERN_ERR "Illegal value %d\n", flag);
1025 return -EINVAL;
1026 }
1027 if (dev->export_count) {
1028 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1029 " export_count is %d\n", dev, dev->export_count);
1030 return -EINVAL;
1031 }
1032
1033 dev->dev_attrib.force_pr_aptpl = flag;
1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1035 return 0;
1036}
1037
1022int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1038int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1023{ 1039{
1024 if ((flag != 0) && (flag != 1)) { 1040 if ((flag != 0) && (flag != 1)) {
@@ -1250,24 +1266,16 @@ struct se_lun *core_dev_add_lun(
1250 * 1266 *
1251 * 1267 *
1252 */ 1268 */
1253int core_dev_del_lun( 1269void core_dev_del_lun(
1254 struct se_portal_group *tpg, 1270 struct se_portal_group *tpg,
1255 u32 unpacked_lun) 1271 struct se_lun *lun)
1256{ 1272{
1257 struct se_lun *lun; 1273 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1258
1259 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1260 if (IS_ERR(lun))
1261 return PTR_ERR(lun);
1262
1263 core_tpg_post_dellun(tpg, lun);
1264
1265 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1266 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1274 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1267 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1275 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1268 tpg->se_tpg_tfo->get_fabric_name()); 1276 tpg->se_tpg_tfo->get_fabric_name());
1269 1277
1270 return 0; 1278 core_tpg_remove_lun(tpg, lun);
1271} 1279}
1272 1280
1273struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1281struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
@@ -1396,8 +1404,7 @@ int core_dev_add_initiator_node_lun_acl(
1396 1404
1397 spin_lock(&lun->lun_acl_lock); 1405 spin_lock(&lun->lun_acl_lock);
1398 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1406 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1399 atomic_inc(&lun->lun_acl_count); 1407 atomic_inc_mb(&lun->lun_acl_count);
1400 smp_mb__after_atomic();
1401 spin_unlock(&lun->lun_acl_lock); 1408 spin_unlock(&lun->lun_acl_lock);
1402 1409
1403 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1410 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1409,7 +1416,8 @@ int core_dev_add_initiator_node_lun_acl(
1409 * Check to see if there are any existing persistent reservation APTPL 1416 * Check to see if there are any existing persistent reservation APTPL
1410 * pre-registrations that need to be enabled for this LUN ACL.. 1417 * pre-registrations that need to be enabled for this LUN ACL..
1411 */ 1418 */
1412 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1419 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
1420 lacl->mapped_lun);
1413 return 0; 1421 return 0;
1414} 1422}
1415 1423
@@ -1430,8 +1438,7 @@ int core_dev_del_initiator_node_lun_acl(
1430 1438
1431 spin_lock(&lun->lun_acl_lock); 1439 spin_lock(&lun->lun_acl_lock);
1432 list_del(&lacl->lacl_list); 1440 list_del(&lacl->lacl_list);
1433 atomic_dec(&lun->lun_acl_count); 1441 atomic_dec_mb(&lun->lun_acl_count);
1434 smp_mb__after_atomic();
1435 spin_unlock(&lun->lun_acl_lock); 1442 spin_unlock(&lun->lun_acl_lock);
1436 1443
1437 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1444 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
@@ -1554,6 +1561,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1554 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1561 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1555 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 1562 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1556 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1563 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1564 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
1557 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1565 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1558 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1566 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1559 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1567 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 7de9f0475d05..0c3f90130b7d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -320,7 +320,7 @@ static struct config_group *target_fabric_make_mappedlun(
320 struct se_node_acl, acl_group); 320 struct se_node_acl, acl_group);
321 struct se_portal_group *se_tpg = se_nacl->se_tpg; 321 struct se_portal_group *se_tpg = se_nacl->se_tpg;
322 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 322 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
323 struct se_lun_acl *lacl; 323 struct se_lun_acl *lacl = NULL;
324 struct config_item *acl_ci; 324 struct config_item *acl_ci;
325 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 325 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
326 char *buf; 326 char *buf;
@@ -406,6 +406,7 @@ static struct config_group *target_fabric_make_mappedlun(
406out: 406out:
407 if (lacl_cg) 407 if (lacl_cg)
408 kfree(lacl_cg->default_groups); 408 kfree(lacl_cg->default_groups);
409 kfree(lacl);
409 kfree(buf); 410 kfree(buf);
410 return ERR_PTR(ret); 411 return ERR_PTR(ret);
411} 412}
@@ -821,7 +822,7 @@ static int target_fabric_port_unlink(
821 tf->tf_ops.fabric_pre_unlink(se_tpg, lun); 822 tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
822 } 823 }
823 824
824 core_dev_del_lun(se_tpg, lun->unpacked_lun); 825 core_dev_del_lun(se_tpg, lun);
825 return 0; 826 return 0;
826} 827}
827 828
@@ -910,16 +911,12 @@ static struct config_group *target_fabric_make_lun(
910 GFP_KERNEL); 911 GFP_KERNEL);
911 if (!port_stat_grp->default_groups) { 912 if (!port_stat_grp->default_groups) {
912 pr_err("Unable to allocate port_stat_grp->default_groups\n"); 913 pr_err("Unable to allocate port_stat_grp->default_groups\n");
913 errno = -ENOMEM; 914 kfree(lun_cg->default_groups);
914 goto out; 915 return ERR_PTR(-ENOMEM);
915 } 916 }
916 target_stat_setup_port_default_groups(lun); 917 target_stat_setup_port_default_groups(lun);
917 918
918 return &lun->lun_group; 919 return &lun->lun_group;
919out:
920 if (lun_cg)
921 kfree(lun_cg->default_groups);
922 return ERR_PTR(errno);
923} 920}
924 921
925static void target_fabric_drop_lun( 922static void target_fabric_drop_lun(
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 0d1cf8b4f49f..35bfe77160d8 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -394,9 +394,9 @@ char *iscsi_parse_pr_out_transport_id(
394 * If the caller wants the TransportID Length, we set that value for the 394 * If the caller wants the TransportID Length, we set that value for the
395 * entire iSCSI Tarnsport ID now. 395 * entire iSCSI Tarnsport ID now.
396 */ 396 */
397 if (out_tid_len != NULL) { 397 if (out_tid_len) {
398 add_len = ((buf[2] >> 8) & 0xff); 398 /* The shift works thanks to integer promotion rules */
399 add_len |= (buf[3] & 0xff); 399 add_len = (buf[2] << 8) | buf[3];
400 400
401 tid_len = strlen(&buf[4]); 401 tid_len = strlen(&buf[4]);
402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7d6cddaec525..72c83d98662b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -415,7 +415,7 @@ fd_execute_sync_cache(struct se_cmd *cmd)
415 } else { 415 } else {
416 start = cmd->t_task_lba * dev->dev_attrib.block_size; 416 start = cmd->t_task_lba * dev->dev_attrib.block_size;
417 if (cmd->data_length) 417 if (cmd->data_length)
418 end = start + cmd->data_length; 418 end = start + cmd->data_length - 1;
419 else 419 else
420 end = LLONG_MAX; 420 end = LLONG_MAX;
421 } 421 }
@@ -680,7 +680,12 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
680 struct fd_dev *fd_dev = FD_DEV(dev); 680 struct fd_dev *fd_dev = FD_DEV(dev);
681 loff_t start = cmd->t_task_lba * 681 loff_t start = cmd->t_task_lba *
682 dev->dev_attrib.block_size; 682 dev->dev_attrib.block_size;
683 loff_t end = start + cmd->data_length; 683 loff_t end;
684
685 if (cmd->data_length)
686 end = start + cmd->data_length - 1;
687 else
688 end = LLONG_MAX;
684 689
685 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 690 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
686 } 691 }
@@ -762,7 +767,9 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
762 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 767 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
763 break; 768 break;
764 case Opt_fd_buffered_io: 769 case Opt_fd_buffered_io:
765 match_int(args, &arg); 770 ret = match_int(args, &arg);
771 if (ret)
772 goto out;
766 if (arg != 1) { 773 if (arg != 1) {
767 pr_err("bogus fd_buffered_io=%d value\n", arg); 774 pr_err("bogus fd_buffered_io=%d value\n", arg);
768 ret = -EINVAL; 775 ret = -EINVAL;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index de9cab708f45..e31f42f369ff 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -38,6 +38,7 @@ int se_dev_set_emulate_3pc(struct se_device *, int);
38int se_dev_set_pi_prot_type(struct se_device *, int); 38int se_dev_set_pi_prot_type(struct se_device *, int);
39int se_dev_set_pi_prot_format(struct se_device *, int); 39int se_dev_set_pi_prot_format(struct se_device *, int);
40int se_dev_set_enforce_pr_isids(struct se_device *, int); 40int se_dev_set_enforce_pr_isids(struct se_device *, int);
41int se_dev_set_force_pr_aptpl(struct se_device *, int);
41int se_dev_set_is_nonrot(struct se_device *, int); 42int se_dev_set_is_nonrot(struct se_device *, int);
42int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 43int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
43int se_dev_set_queue_depth(struct se_device *, u32); 44int se_dev_set_queue_depth(struct se_device *, u32);
@@ -46,7 +47,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *, u32);
46int se_dev_set_optimal_sectors(struct se_device *, u32); 47int se_dev_set_optimal_sectors(struct se_device *, u32);
47int se_dev_set_block_size(struct se_device *, u32); 48int se_dev_set_block_size(struct se_device *, u32);
48struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); 49struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
49int core_dev_del_lun(struct se_portal_group *, u32); 50void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
50struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); 51struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
51struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, 52struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
52 struct se_node_acl *, u32, int *); 53 struct se_node_acl *, u32, int *);
@@ -82,8 +83,7 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
82struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); 83struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
83int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, 84int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
84 u32, struct se_device *); 85 u32, struct se_device *);
85struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 86void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
86int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
87 87
88/* target_core_transport.c */ 88/* target_core_transport.c */
89extern struct kmem_cache *se_tmr_req_cache; 89extern struct kmem_cache *se_tmr_req_cache;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index df357862286e..8c60a1a1ae8d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
674 */ 674 */
675 spin_lock(&dev->se_port_lock); 675 spin_lock(&dev->se_port_lock);
676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
677 atomic_inc(&port->sep_tg_pt_ref_cnt); 677 atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
678 smp_mb__after_atomic();
679 spin_unlock(&dev->se_port_lock); 678 spin_unlock(&dev->se_port_lock);
680 679
681 spin_lock_bh(&port->sep_alua_lock); 680 spin_lock_bh(&port->sep_alua_lock);
@@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
709 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) 708 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
710 continue; 709 continue;
711 710
712 atomic_inc(&deve_tmp->pr_ref_count); 711 atomic_inc_mb(&deve_tmp->pr_ref_count);
713 smp_mb__after_atomic();
714 spin_unlock_bh(&port->sep_alua_lock); 712 spin_unlock_bh(&port->sep_alua_lock);
715 /* 713 /*
716 * Grab a configfs group dependency that is released 714 * Grab a configfs group dependency that is released
@@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
722 if (ret < 0) { 720 if (ret < 0) {
723 pr_err("core_scsi3_lunacl_depend" 721 pr_err("core_scsi3_lunacl_depend"
724 "_item() failed\n"); 722 "_item() failed\n");
725 atomic_dec(&port->sep_tg_pt_ref_cnt); 723 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
726 smp_mb__after_atomic(); 724 atomic_dec_mb(&deve_tmp->pr_ref_count);
727 atomic_dec(&deve_tmp->pr_ref_count);
728 smp_mb__after_atomic();
729 goto out; 725 goto out;
730 } 726 }
731 /* 727 /*
@@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
739 nacl_tmp, deve_tmp, NULL, 735 nacl_tmp, deve_tmp, NULL,
740 sa_res_key, all_tg_pt, aptpl); 736 sa_res_key, all_tg_pt, aptpl);
741 if (!pr_reg_atp) { 737 if (!pr_reg_atp) {
742 atomic_dec(&port->sep_tg_pt_ref_cnt); 738 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
743 smp_mb__after_atomic(); 739 atomic_dec_mb(&deve_tmp->pr_ref_count);
744 atomic_dec(&deve_tmp->pr_ref_count);
745 smp_mb__after_atomic();
746 core_scsi3_lunacl_undepend_item(deve_tmp); 740 core_scsi3_lunacl_undepend_item(deve_tmp);
747 goto out; 741 goto out;
748 } 742 }
@@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
754 spin_unlock_bh(&port->sep_alua_lock); 748 spin_unlock_bh(&port->sep_alua_lock);
755 749
756 spin_lock(&dev->se_port_lock); 750 spin_lock(&dev->se_port_lock);
757 atomic_dec(&port->sep_tg_pt_ref_cnt); 751 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
758 smp_mb__after_atomic();
759 } 752 }
760 spin_unlock(&dev->se_port_lock); 753 spin_unlock(&dev->se_port_lock);
761 754
@@ -902,6 +895,7 @@ static int __core_scsi3_check_aptpl_registration(
902 spin_lock(&pr_tmpl->aptpl_reg_lock); 895 spin_lock(&pr_tmpl->aptpl_reg_lock);
903 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 896 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
904 pr_reg_aptpl_list) { 897 pr_reg_aptpl_list) {
898
905 if (!strcmp(pr_reg->pr_iport, i_port) && 899 if (!strcmp(pr_reg->pr_iport, i_port) &&
906 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && 900 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
907 !(strcmp(pr_reg->pr_tport, t_port)) && 901 !(strcmp(pr_reg->pr_tport, t_port)) &&
@@ -944,10 +938,10 @@ int core_scsi3_check_aptpl_registration(
944 struct se_device *dev, 938 struct se_device *dev,
945 struct se_portal_group *tpg, 939 struct se_portal_group *tpg,
946 struct se_lun *lun, 940 struct se_lun *lun,
947 struct se_lun_acl *lun_acl) 941 struct se_node_acl *nacl,
942 u32 mapped_lun)
948{ 943{
949 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 944 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
950 struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
951 945
952 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 946 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
953 return 0; 947 return 0;
@@ -1109,8 +1103,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1109 if (dev->dev_attrib.enforce_pr_isids) 1103 if (dev->dev_attrib.enforce_pr_isids)
1110 continue; 1104 continue;
1111 } 1105 }
1112 atomic_inc(&pr_reg->pr_res_holders); 1106 atomic_inc_mb(&pr_reg->pr_res_holders);
1113 smp_mb__after_atomic();
1114 spin_unlock(&pr_tmpl->registration_lock); 1107 spin_unlock(&pr_tmpl->registration_lock);
1115 return pr_reg; 1108 return pr_reg;
1116 } 1109 }
@@ -1124,8 +1117,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1124 if (strcmp(isid, pr_reg->pr_reg_isid)) 1117 if (strcmp(isid, pr_reg->pr_reg_isid))
1125 continue; 1118 continue;
1126 1119
1127 atomic_inc(&pr_reg->pr_res_holders); 1120 atomic_inc_mb(&pr_reg->pr_res_holders);
1128 smp_mb__after_atomic();
1129 spin_unlock(&pr_tmpl->registration_lock); 1121 spin_unlock(&pr_tmpl->registration_lock);
1130 return pr_reg; 1122 return pr_reg;
1131 } 1123 }
@@ -1154,8 +1146,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1154 1146
1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1147static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1156{ 1148{
1157 atomic_dec(&pr_reg->pr_res_holders); 1149 atomic_dec_mb(&pr_reg->pr_res_holders);
1158 smp_mb__after_atomic();
1159} 1150}
1160 1151
1161static int core_scsi3_check_implicit_release( 1152static int core_scsi3_check_implicit_release(
@@ -1348,8 +1339,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1348 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1339 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1349 &tpg->tpg_group.cg_item); 1340 &tpg->tpg_group.cg_item);
1350 1341
1351 atomic_dec(&tpg->tpg_pr_ref_count); 1342 atomic_dec_mb(&tpg->tpg_pr_ref_count);
1352 smp_mb__after_atomic();
1353} 1343}
1354 1344
1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1345static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1368,16 +1358,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1368 struct se_portal_group *tpg = nacl->se_tpg; 1358 struct se_portal_group *tpg = nacl->se_tpg;
1369 1359
1370 if (nacl->dynamic_node_acl) { 1360 if (nacl->dynamic_node_acl) {
1371 atomic_dec(&nacl->acl_pr_ref_count); 1361 atomic_dec_mb(&nacl->acl_pr_ref_count);
1372 smp_mb__after_atomic();
1373 return; 1362 return;
1374 } 1363 }
1375 1364
1376 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1365 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1377 &nacl->acl_group.cg_item); 1366 &nacl->acl_group.cg_item);
1378 1367
1379 atomic_dec(&nacl->acl_pr_ref_count); 1368 atomic_dec_mb(&nacl->acl_pr_ref_count);
1380 smp_mb__after_atomic();
1381} 1369}
1382 1370
1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1371static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1407,8 +1395,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1407 * For nacl->dynamic_node_acl=1 1395 * For nacl->dynamic_node_acl=1
1408 */ 1396 */
1409 if (!lun_acl) { 1397 if (!lun_acl) {
1410 atomic_dec(&se_deve->pr_ref_count); 1398 atomic_dec_mb(&se_deve->pr_ref_count);
1411 smp_mb__after_atomic();
1412 return; 1399 return;
1413 } 1400 }
1414 nacl = lun_acl->se_lun_nacl; 1401 nacl = lun_acl->se_lun_nacl;
@@ -1417,8 +1404,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1417 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1404 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1418 &lun_acl->se_lun_group.cg_item); 1405 &lun_acl->se_lun_group.cg_item);
1419 1406
1420 atomic_dec(&se_deve->pr_ref_count); 1407 atomic_dec_mb(&se_deve->pr_ref_count);
1421 smp_mb__after_atomic();
1422} 1408}
1423 1409
1424static sense_reason_t 1410static sense_reason_t
@@ -1551,15 +1537,13 @@ core_scsi3_decode_spec_i_port(
1551 if (!i_str) 1537 if (!i_str)
1552 continue; 1538 continue;
1553 1539
1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1540 atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
1555 smp_mb__after_atomic();
1556 spin_unlock(&dev->se_port_lock); 1541 spin_unlock(&dev->se_port_lock);
1557 1542
1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1543 if (core_scsi3_tpg_depend_item(tmp_tpg)) {
1559 pr_err(" core_scsi3_tpg_depend_item()" 1544 pr_err(" core_scsi3_tpg_depend_item()"
1560 " for tmp_tpg\n"); 1545 " for tmp_tpg\n");
1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1546 atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
1562 smp_mb__after_atomic();
1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1547 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1564 goto out_unmap; 1548 goto out_unmap;
1565 } 1549 }
@@ -1571,10 +1555,8 @@ core_scsi3_decode_spec_i_port(
1571 spin_lock_irq(&tmp_tpg->acl_node_lock); 1555 spin_lock_irq(&tmp_tpg->acl_node_lock);
1572 dest_node_acl = __core_tpg_get_initiator_node_acl( 1556 dest_node_acl = __core_tpg_get_initiator_node_acl(
1573 tmp_tpg, i_str); 1557 tmp_tpg, i_str);
1574 if (dest_node_acl) { 1558 if (dest_node_acl)
1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1559 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
1576 smp_mb__after_atomic();
1577 }
1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1560 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1579 1561
1580 if (!dest_node_acl) { 1562 if (!dest_node_acl) {
@@ -1586,8 +1568,7 @@ core_scsi3_decode_spec_i_port(
1586 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 1568 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
1587 pr_err("configfs_depend_item() failed" 1569 pr_err("configfs_depend_item() failed"
1588 " for dest_node_acl->acl_group\n"); 1570 " for dest_node_acl->acl_group\n");
1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1571 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
1590 smp_mb__after_atomic();
1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1572 core_scsi3_tpg_undepend_item(tmp_tpg);
1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1573 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1593 goto out_unmap; 1574 goto out_unmap;
@@ -1646,8 +1627,7 @@ core_scsi3_decode_spec_i_port(
1646 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 1627 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
1647 pr_err("core_scsi3_lunacl_depend_item()" 1628 pr_err("core_scsi3_lunacl_depend_item()"
1648 " failed\n"); 1629 " failed\n");
1649 atomic_dec(&dest_se_deve->pr_ref_count); 1630 atomic_dec_mb(&dest_se_deve->pr_ref_count);
1650 smp_mb__after_atomic();
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1631 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg); 1632 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1633 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -3167,15 +3147,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3167 if (!dest_tf_ops) 3147 if (!dest_tf_ops)
3168 continue; 3148 continue;
3169 3149
3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3150 atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
3171 smp_mb__after_atomic();
3172 spin_unlock(&dev->se_port_lock); 3151 spin_unlock(&dev->se_port_lock);
3173 3152
3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3153 if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
3175 pr_err("core_scsi3_tpg_depend_item() failed" 3154 pr_err("core_scsi3_tpg_depend_item() failed"
3176 " for dest_se_tpg\n"); 3155 " for dest_se_tpg\n");
3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3156 atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
3178 smp_mb__after_atomic();
3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3157 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3180 goto out_put_pr_reg; 3158 goto out_put_pr_reg;
3181 } 3159 }
@@ -3271,10 +3249,8 @@ after_iport_check:
3271 spin_lock_irq(&dest_se_tpg->acl_node_lock); 3249 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3272 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3250 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3273 initiator_str); 3251 initiator_str);
3274 if (dest_node_acl) { 3252 if (dest_node_acl)
3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3253 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
3276 smp_mb__after_atomic();
3277 }
3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3254 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3279 3255
3280 if (!dest_node_acl) { 3256 if (!dest_node_acl) {
@@ -3288,8 +3264,7 @@ after_iport_check:
3288 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 3264 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3265 pr_err("core_scsi3_nodeacl_depend_item() for"
3290 " dest_node_acl\n"); 3266 " dest_node_acl\n");
3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3267 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
3292 smp_mb__after_atomic();
3293 dest_node_acl = NULL; 3268 dest_node_acl = NULL;
3294 ret = TCM_INVALID_PARAMETER_LIST; 3269 ret = TCM_INVALID_PARAMETER_LIST;
3295 goto out; 3270 goto out;
@@ -3313,8 +3288,7 @@ after_iport_check:
3313 3288
3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3289 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3290 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3316 atomic_dec(&dest_se_deve->pr_ref_count); 3291 atomic_dec_mb(&dest_se_deve->pr_ref_count);
3317 smp_mb__after_atomic();
3318 dest_se_deve = NULL; 3292 dest_se_deve = NULL;
3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3293 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3320 goto out; 3294 goto out;
@@ -3497,6 +3471,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3497sense_reason_t 3471sense_reason_t
3498target_scsi3_emulate_pr_out(struct se_cmd *cmd) 3472target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3499{ 3473{
3474 struct se_device *dev = cmd->se_dev;
3500 unsigned char *cdb = &cmd->t_task_cdb[0]; 3475 unsigned char *cdb = &cmd->t_task_cdb[0];
3501 unsigned char *buf; 3476 unsigned char *buf;
3502 u64 res_key, sa_res_key; 3477 u64 res_key, sa_res_key;
@@ -3561,6 +3536,13 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3561 aptpl = (buf[17] & 0x01); 3536 aptpl = (buf[17] & 0x01);
3562 unreg = (buf[17] & 0x02); 3537 unreg = (buf[17] & 0x02);
3563 } 3538 }
3539 /*
3540 * If the backend device has been configured to force APTPL metadata
3541 * write-out, go ahead and propigate aptpl=1 down now.
3542 */
3543 if (dev->dev_attrib.force_pr_aptpl)
3544 aptpl = 1;
3545
3564 transport_kunmap_data_sg(cmd); 3546 transport_kunmap_data_sg(cmd);
3565 buf = NULL; 3547 buf = NULL;
3566 3548
@@ -3803,7 +3785,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3803 if (!buf) 3785 if (!buf)
3804 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3786 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3805 3787
3806 buf[0] = ((add_len << 8) & 0xff); 3788 buf[0] = ((add_len >> 8) & 0xff);
3807 buf[1] = (add_len & 0xff); 3789 buf[1] = (add_len & 0xff);
3808 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ 3790 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
3809 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ 3791 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
@@ -3879,8 +3861,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3879 se_tpg = pr_reg->pr_reg_nacl->se_tpg; 3861 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
3880 add_desc_len = 0; 3862 add_desc_len = 0;
3881 3863
3882 atomic_inc(&pr_reg->pr_res_holders); 3864 atomic_inc_mb(&pr_reg->pr_res_holders);
3883 smp_mb__after_atomic();
3884 spin_unlock(&pr_tmpl->registration_lock); 3865 spin_unlock(&pr_tmpl->registration_lock);
3885 /* 3866 /*
3886 * Determine expected length of $FABRIC_MOD specific 3867 * Determine expected length of $FABRIC_MOD specific
@@ -3893,8 +3874,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3893 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" 3874 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
3894 " out of buffer: %d\n", cmd->data_length); 3875 " out of buffer: %d\n", cmd->data_length);
3895 spin_lock(&pr_tmpl->registration_lock); 3876 spin_lock(&pr_tmpl->registration_lock);
3896 atomic_dec(&pr_reg->pr_res_holders); 3877 atomic_dec_mb(&pr_reg->pr_res_holders);
3897 smp_mb__after_atomic();
3898 break; 3878 break;
3899 } 3879 }
3900 /* 3880 /*
@@ -3955,8 +3935,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3955 se_nacl, pr_reg, &format_code, &buf[off+4]); 3935 se_nacl, pr_reg, &format_code, &buf[off+4]);
3956 3936
3957 spin_lock(&pr_tmpl->registration_lock); 3937 spin_lock(&pr_tmpl->registration_lock);
3958 atomic_dec(&pr_reg->pr_res_holders); 3938 atomic_dec_mb(&pr_reg->pr_res_holders);
3959 smp_mb__after_atomic();
3960 /* 3939 /*
3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3940 * Set the ADDITIONAL DESCRIPTOR LENGTH
3962 */ 3941 */
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 2ee2936fa0bd..749fd7bb7510 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
60 unsigned char *, u16, u32, int, int, u8); 60 unsigned char *, u16, u32, int, int, u8);
61extern int core_scsi3_check_aptpl_registration(struct se_device *, 61extern int core_scsi3_check_aptpl_registration(struct se_device *,
62 struct se_portal_group *, struct se_lun *, 62 struct se_portal_group *, struct se_lun *,
63 struct se_lun_acl *); 63 struct se_node_acl *, u32);
64extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, 64extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
65 struct se_node_acl *); 65 struct se_node_acl *);
66extern void core_scsi3_free_all_registrations(struct se_device *); 66extern void core_scsi3_free_all_registrations(struct se_device *);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 70d9f6dabba0..7c8291f0bbbc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -749,14 +749,18 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
749 ret = -EINVAL; 749 ret = -EINVAL;
750 goto out; 750 goto out;
751 } 751 }
752 match_int(args, &arg); 752 ret = match_int(args, &arg);
753 if (ret)
754 goto out;
753 pdv->pdv_host_id = arg; 755 pdv->pdv_host_id = arg;
754 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 756 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
755 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 757 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
756 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 758 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
757 break; 759 break;
758 case Opt_scsi_channel_id: 760 case Opt_scsi_channel_id:
759 match_int(args, &arg); 761 ret = match_int(args, &arg);
762 if (ret)
763 goto out;
760 pdv->pdv_channel_id = arg; 764 pdv->pdv_channel_id = arg;
761 pr_debug("PSCSI[%d]: Referencing SCSI Channel" 765 pr_debug("PSCSI[%d]: Referencing SCSI Channel"
762 " ID: %d\n", phv->phv_host_id, 766 " ID: %d\n", phv->phv_host_id,
@@ -764,7 +768,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
764 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 768 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
765 break; 769 break;
766 case Opt_scsi_target_id: 770 case Opt_scsi_target_id:
767 match_int(args, &arg); 771 ret = match_int(args, &arg);
772 if (ret)
773 goto out;
768 pdv->pdv_target_id = arg; 774 pdv->pdv_target_id = arg;
769 pr_debug("PSCSI[%d]: Referencing SCSI Target" 775 pr_debug("PSCSI[%d]: Referencing SCSI Target"
770 " ID: %d\n", phv->phv_host_id, 776 " ID: %d\n", phv->phv_host_id,
@@ -772,7 +778,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
772 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 778 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
773 break; 779 break;
774 case Opt_scsi_lun_id: 780 case Opt_scsi_lun_id:
775 match_int(args, &arg); 781 ret = match_int(args, &arg);
782 if (ret)
783 goto out;
776 pdv->pdv_lun_id = arg; 784 pdv->pdv_lun_id = arg;
777 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 785 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
778 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 786 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index bd78d9235ac6..ebe62afb957d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -948,7 +948,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
948 } 948 }
949 949
950 /* reject any command that we don't have a handler for */ 950 /* reject any command that we don't have a handler for */
951 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 951 if (!cmd->execute_cmd)
952 return TCM_UNSUPPORTED_SCSI_OPCODE; 952 return TCM_UNSUPPORTED_SCSI_OPCODE;
953 953
954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f7cd95e8111a..fa5e157db47b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -64,21 +64,17 @@ int core_tmr_alloc_req(
64} 64}
65EXPORT_SYMBOL(core_tmr_alloc_req); 65EXPORT_SYMBOL(core_tmr_alloc_req);
66 66
67void core_tmr_release_req( 67void core_tmr_release_req(struct se_tmr_req *tmr)
68 struct se_tmr_req *tmr)
69{ 68{
70 struct se_device *dev = tmr->tmr_dev; 69 struct se_device *dev = tmr->tmr_dev;
71 unsigned long flags; 70 unsigned long flags;
72 71
73 if (!dev) { 72 if (dev) {
74 kfree(tmr); 73 spin_lock_irqsave(&dev->se_tmr_lock, flags);
75 return; 74 list_del(&tmr->tmr_list);
75 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
76 } 76 }
77 77
78 spin_lock_irqsave(&dev->se_tmr_lock, flags);
79 list_del(&tmr->tmr_list);
80 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
81
82 kfree(tmr); 78 kfree(tmr);
83} 79}
84 80
@@ -90,9 +86,8 @@ static void core_tmr_handle_tas_abort(
90 bool remove = true; 86 bool remove = true;
91 /* 87 /*
92 * TASK ABORTED status (TAS) bit support 88 * TASK ABORTED status (TAS) bit support
93 */ 89 */
94 if ((tmr_nacl && 90 if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
95 (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
96 remove = false; 91 remove = false;
97 transport_send_task_abort(cmd); 92 transport_send_task_abort(cmd);
98 } 93 }
@@ -120,13 +115,12 @@ void core_tmr_abort_task(
120 struct se_tmr_req *tmr, 115 struct se_tmr_req *tmr,
121 struct se_session *se_sess) 116 struct se_session *se_sess)
122{ 117{
123 struct se_cmd *se_cmd, *tmp_cmd; 118 struct se_cmd *se_cmd;
124 unsigned long flags; 119 unsigned long flags;
125 int ref_tag; 120 int ref_tag;
126 121
127 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 122 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
128 list_for_each_entry_safe(se_cmd, tmp_cmd, 123 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
129 &se_sess->sess_cmd_list, se_cmd_list) {
130 124
131 if (dev != se_cmd->se_dev) 125 if (dev != se_cmd->se_dev)
132 continue; 126 continue;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index be783f717f19..0696de9553d3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -40,6 +40,7 @@
40#include <target/target_core_fabric.h> 40#include <target/target_core_fabric.h>
41 41
42#include "target_core_internal.h" 42#include "target_core_internal.h"
43#include "target_core_pr.h"
43 44
44extern struct se_device *g_lun0_dev; 45extern struct se_device *g_lun0_dev;
45 46
@@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
166 167
167 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 168 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
168 lun_access, acl, tpg); 169 lun_access, acl, tpg);
170 /*
171 * Check to see if there are any existing persistent reservation
172 * APTPL pre-registrations that need to be enabled for this dynamic
173 * LUN ACL now..
174 */
175 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
176 lun->unpacked_lun);
169 spin_lock(&tpg->tpg_lun_lock); 177 spin_lock(&tpg->tpg_lun_lock);
170 } 178 }
171 spin_unlock(&tpg->tpg_lun_lock); 179 spin_unlock(&tpg->tpg_lun_lock);
@@ -335,7 +343,7 @@ void core_tpg_clear_object_luns(struct se_portal_group *tpg)
335 continue; 343 continue;
336 344
337 spin_unlock(&tpg->tpg_lun_lock); 345 spin_unlock(&tpg->tpg_lun_lock);
338 core_dev_del_lun(tpg, lun->unpacked_lun); 346 core_dev_del_lun(tpg, lun);
339 spin_lock(&tpg->tpg_lun_lock); 347 spin_lock(&tpg->tpg_lun_lock);
340 } 348 }
341 spin_unlock(&tpg->tpg_lun_lock); 349 spin_unlock(&tpg->tpg_lun_lock);
@@ -663,13 +671,6 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
663 return 0; 671 return 0;
664} 672}
665 673
666static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
667{
668 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
669
670 core_tpg_post_dellun(se_tpg, lun);
671}
672
673int core_tpg_register( 674int core_tpg_register(
674 struct target_core_fabric_ops *tfo, 675 struct target_core_fabric_ops *tfo,
675 struct se_wwn *se_wwn, 676 struct se_wwn *se_wwn,
@@ -773,7 +774,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
773 spin_unlock_irq(&se_tpg->acl_node_lock); 774 spin_unlock_irq(&se_tpg->acl_node_lock);
774 775
775 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 776 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
776 core_tpg_release_virtual_lun0(se_tpg); 777 core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
777 778
778 se_tpg->se_tpg_fabric_ptr = NULL; 779 se_tpg->se_tpg_fabric_ptr = NULL;
779 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); 780 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
@@ -838,37 +839,7 @@ int core_tpg_add_lun(
838 return 0; 839 return 0;
839} 840}
840 841
841struct se_lun *core_tpg_pre_dellun( 842void core_tpg_remove_lun(
842 struct se_portal_group *tpg,
843 u32 unpacked_lun)
844{
845 struct se_lun *lun;
846
847 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
848 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
849 "-1: %u for Target Portal Group: %u\n",
850 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
851 TRANSPORT_MAX_LUNS_PER_TPG-1,
852 tpg->se_tpg_tfo->tpg_get_tag(tpg));
853 return ERR_PTR(-EOVERFLOW);
854 }
855
856 spin_lock(&tpg->tpg_lun_lock);
857 lun = tpg->tpg_lun_list[unpacked_lun];
858 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
859 pr_err("%s Logical Unit Number: %u is not active on"
860 " Target Portal Group: %u, ignoring request.\n",
861 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
862 tpg->se_tpg_tfo->tpg_get_tag(tpg));
863 spin_unlock(&tpg->tpg_lun_lock);
864 return ERR_PTR(-ENODEV);
865 }
866 spin_unlock(&tpg->tpg_lun_lock);
867
868 return lun;
869}
870
871int core_tpg_post_dellun(
872 struct se_portal_group *tpg, 843 struct se_portal_group *tpg,
873 struct se_lun *lun) 844 struct se_lun *lun)
874{ 845{
@@ -882,6 +853,4 @@ int core_tpg_post_dellun(
882 spin_unlock(&tpg->tpg_lun_lock); 853 spin_unlock(&tpg->tpg_lun_lock);
883 854
884 percpu_ref_exit(&lun->lun_ref); 855 percpu_ref_exit(&lun->lun_ref);
885
886 return 0;
887} 856}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7fa62fc93e0b..9ea0d5f03f7a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -232,6 +232,10 @@ void transport_subsystem_check_init(void)
232 if (ret != 0) 232 if (ret != 0)
233 pr_err("Unable to load target_core_pscsi\n"); 233 pr_err("Unable to load target_core_pscsi\n");
234 234
235 ret = request_module("target_core_user");
236 if (ret != 0)
237 pr_err("Unable to load target_core_user\n");
238
235 sub_api_initialized = 1; 239 sub_api_initialized = 1;
236} 240}
237 241
@@ -752,8 +756,7 @@ void target_qf_do_work(struct work_struct *work)
752 756
753 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 757 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
754 list_del(&cmd->se_qf_node); 758 list_del(&cmd->se_qf_node);
755 atomic_dec(&dev->dev_qf_count); 759 atomic_dec_mb(&dev->dev_qf_count);
756 smp_mb__after_atomic();
757 760
758 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 761 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
759 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 762 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1166,7 +1169,6 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1166 * Dormant to Active status. 1169 * Dormant to Active status.
1167 */ 1170 */
1168 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1171 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1169 smp_mb__after_atomic();
1170 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1172 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1171 cmd->se_ordered_id, cmd->sam_task_attr, 1173 cmd->se_ordered_id, cmd->sam_task_attr,
1172 dev->transport->name); 1174 dev->transport->name);
@@ -1722,8 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1722 cmd->t_task_cdb[0], cmd->se_ordered_id); 1724 cmd->t_task_cdb[0], cmd->se_ordered_id);
1723 return false; 1725 return false;
1724 case MSG_ORDERED_TAG: 1726 case MSG_ORDERED_TAG:
1725 atomic_inc(&dev->dev_ordered_sync); 1727 atomic_inc_mb(&dev->dev_ordered_sync);
1726 smp_mb__after_atomic();
1727 1728
1728 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1729 " se_ordered_id: %u\n", 1730 " se_ordered_id: %u\n",
@@ -1740,8 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1740 /* 1741 /*
1741 * For SIMPLE and UNTAGGED Task Attribute commands 1742 * For SIMPLE and UNTAGGED Task Attribute commands
1742 */ 1743 */
1743 atomic_inc(&dev->simple_cmds); 1744 atomic_inc_mb(&dev->simple_cmds);
1744 smp_mb__after_atomic();
1745 break; 1745 break;
1746 } 1746 }
1747 1747
@@ -1845,8 +1845,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1845 return; 1845 return;
1846 1846
1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1848 atomic_dec(&dev->simple_cmds); 1848 atomic_dec_mb(&dev->simple_cmds);
1849 smp_mb__after_atomic();
1850 dev->dev_cur_ordered_id++; 1849 dev->dev_cur_ordered_id++;
1851 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1852 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1857,8 +1856,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1857 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1858 cmd->se_ordered_id); 1857 cmd->se_ordered_id);
1859 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1858 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1860 atomic_dec(&dev->dev_ordered_sync); 1859 atomic_dec_mb(&dev->dev_ordered_sync);
1861 smp_mb__after_atomic();
1862 1860
1863 dev->dev_cur_ordered_id++; 1861 dev->dev_cur_ordered_id++;
1864 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1862 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1877,8 +1875,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
1877 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1875 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1878 trace_target_cmd_complete(cmd); 1876 trace_target_cmd_complete(cmd);
1879 ret = cmd->se_tfo->queue_status(cmd); 1877 ret = cmd->se_tfo->queue_status(cmd);
1880 if (ret) 1878 goto out;
1881 goto out;
1882 } 1879 }
1883 1880
1884 switch (cmd->data_direction) { 1881 switch (cmd->data_direction) {
@@ -1916,8 +1913,7 @@ static void transport_handle_queue_full(
1916{ 1913{
1917 spin_lock_irq(&dev->qf_cmd_lock); 1914 spin_lock_irq(&dev->qf_cmd_lock);
1918 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1915 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1919 atomic_inc(&dev->dev_qf_count); 1916 atomic_inc_mb(&dev->dev_qf_count);
1920 smp_mb__after_atomic();
1921 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1917 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1922 1918
1923 schedule_work(&cmd->se_dev->qf_work_queue); 1919 schedule_work(&cmd->se_dev->qf_work_queue);
@@ -2896,7 +2892,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
2896 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2892 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2897 cmd->transport_state |= CMD_T_ABORTED; 2893 cmd->transport_state |= CMD_T_ABORTED;
2898 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2894 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2899 smp_mb__after_atomic();
2900 return; 2895 return;
2901 } 2896 }
2902 } 2897 }
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 101858e245b3..1738b1646988 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -161,8 +161,7 @@ int core_scsi3_ua_allocate(
161 spin_unlock(&deve->ua_lock); 161 spin_unlock(&deve->ua_lock);
162 spin_unlock_irq(&nacl->device_list_lock); 162 spin_unlock_irq(&nacl->device_list_lock);
163 163
164 atomic_inc(&deve->ua_count); 164 atomic_inc_mb(&deve->ua_count);
165 smp_mb__after_atomic();
166 return 0; 165 return 0;
167 } 166 }
168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 167 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -174,8 +173,7 @@ int core_scsi3_ua_allocate(
174 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 173 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
175 asc, ascq); 174 asc, ascq);
176 175
177 atomic_inc(&deve->ua_count); 176 atomic_inc_mb(&deve->ua_count);
178 smp_mb__after_atomic();
179 return 0; 177 return 0;
180} 178}
181 179
@@ -189,8 +187,7 @@ void core_scsi3_ua_release_all(
189 list_del(&ua->ua_nacl_list); 187 list_del(&ua->ua_nacl_list);
190 kmem_cache_free(se_ua_cache, ua); 188 kmem_cache_free(se_ua_cache, ua);
191 189
192 atomic_dec(&deve->ua_count); 190 atomic_dec_mb(&deve->ua_count);
193 smp_mb__after_atomic();
194 } 191 }
195 spin_unlock(&deve->ua_lock); 192 spin_unlock(&deve->ua_lock);
196} 193}
@@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition(
250 list_del(&ua->ua_nacl_list); 247 list_del(&ua->ua_nacl_list);
251 kmem_cache_free(se_ua_cache, ua); 248 kmem_cache_free(se_ua_cache, ua);
252 249
253 atomic_dec(&deve->ua_count); 250 atomic_dec_mb(&deve->ua_count);
254 smp_mb__after_atomic();
255 } 251 }
256 spin_unlock(&deve->ua_lock); 252 spin_unlock(&deve->ua_lock);
257 spin_unlock_irq(&nacl->device_list_lock); 253 spin_unlock_irq(&nacl->device_list_lock);
@@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense(
309 list_del(&ua->ua_nacl_list); 305 list_del(&ua->ua_nacl_list);
310 kmem_cache_free(se_ua_cache, ua); 306 kmem_cache_free(se_ua_cache, ua);
311 307
312 atomic_dec(&deve->ua_count); 308 atomic_dec_mb(&deve->ua_count);
313 smp_mb__after_atomic();
314 } 309 }
315 spin_unlock(&deve->ua_lock); 310 spin_unlock(&deve->ua_lock);
316 spin_unlock_irq(&nacl->device_list_lock); 311 spin_unlock_irq(&nacl->device_list_lock);
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index be912b36daae..a6b56b364e7a 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,4 +1,5 @@
1#ifndef TARGET_CORE_UA_H 1#ifndef TARGET_CORE_UA_H
2#define TARGET_CORE_UA_H
2 3
3/* 4/*
4 * From spc4r17, Table D.1: ASC and ASCQ Assignement 5 * From spc4r17, Table D.1: ASC and ASCQ Assignement
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
new file mode 100644
index 000000000000..9a1b314f6482
--- /dev/null
+++ b/drivers/target/target_core_user.c
@@ -0,0 +1,1167 @@
1/*
2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/spinlock.h>
20#include <linux/module.h>
21#include <linux/idr.h>
22#include <linux/timer.h>
23#include <linux/parser.h>
24#include <scsi/scsi.h>
25#include <scsi/scsi_host.h>
26#include <linux/uio_driver.h>
27#include <net/genetlink.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/target_core_backend.h>
31#include <linux/target_core_user.h>
32
33/*
34 * Define a shared-memory interface for LIO to pass SCSI commands and
35 * data to userspace for processing. This is to allow backends that
36 * are too complex for in-kernel support to be possible.
37 *
38 * It uses the UIO framework to do a lot of the device-creation and
39 * introspection work for us.
40 *
41 * See the .h file for how the ring is laid out. Note that while the
42 * command ring is defined, the particulars of the data area are
43 * not. Offset values in the command entry point to other locations
44 * internal to the mmap()ed area. There is separate space outside the
45 * command ring for data buffers. This leaves maximum flexibility for
46 * moving buffer allocations, or even page flipping or other
47 * allocation techniques, without altering the command ring layout.
48 *
49 * SECURITY:
50 * The user process must be assumed to be malicious. There's no way to
51 * prevent it breaking the command ring protocol if it wants, but in
52 * order to prevent other issues we must only ever read *data* from
53 * the shared memory area, not offsets or sizes. This applies to
54 * command ring entries as well as the mailbox. Extra code needed for
55 * this may have a 'UAM' comment.
56 */
57
58
59#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
60
61#define CMDR_SIZE (16 * 4096)
62#define DATA_SIZE (257 * 4096)
63
64#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
65
66static struct device *tcmu_root_device;
67
68struct tcmu_hba {
69 u32 host_id;
70};
71
72/* User wants all cmds or just some */
73enum passthru_level {
74 TCMU_PASS_ALL = 0,
75 TCMU_PASS_IO,
76 TCMU_PASS_INVALID,
77};
78
79#define TCMU_CONFIG_LEN 256
80
81struct tcmu_dev {
82 struct se_device se_dev;
83
84 char *name;
85 struct se_hba *hba;
86
87#define TCMU_DEV_BIT_OPEN 0
88#define TCMU_DEV_BIT_BROKEN 1
89 unsigned long flags;
90 enum passthru_level pass_level;
91
92 struct uio_info uio_info;
93
94 struct tcmu_mailbox *mb_addr;
95 size_t dev_size;
96 u32 cmdr_size;
97 u32 cmdr_last_cleaned;
98 /* Offset of data ring from start of mb */
99 size_t data_off;
100 size_t data_size;
101 /* Ring head + tail values. */
102 /* Must add data_off and mb_addr to get the address */
103 size_t data_head;
104 size_t data_tail;
105
106 wait_queue_head_t wait_cmdr;
107 /* TODO should this be a mutex? */
108 spinlock_t cmdr_lock;
109
110 struct idr commands;
111 spinlock_t commands_lock;
112
113 struct timer_list timeout;
114
115 char dev_config[TCMU_CONFIG_LEN];
116};
117
118#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
119
120#define CMDR_OFF sizeof(struct tcmu_mailbox)
121
122struct tcmu_cmd {
123 struct se_cmd *se_cmd;
124 struct tcmu_dev *tcmu_dev;
125
126 uint16_t cmd_id;
127
128 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
129 cmd has been completed then accessing se_cmd is off limits */
130 size_t data_length;
131
132 unsigned long deadline;
133
134#define TCMU_CMD_BIT_EXPIRED 0
135 unsigned long flags;
136};
137
138static struct kmem_cache *tcmu_cmd_cache;
139
140/* multicast group */
141enum tcmu_multicast_groups {
142 TCMU_MCGRP_CONFIG,
143};
144
145static const struct genl_multicast_group tcmu_mcgrps[] = {
146 [TCMU_MCGRP_CONFIG] = { .name = "config", },
147};
148
149/* Our generic netlink family */
150static struct genl_family tcmu_genl_family = {
151 .id = GENL_ID_GENERATE,
152 .hdrsize = 0,
153 .name = "TCM-USER",
154 .version = 1,
155 .maxattr = TCMU_ATTR_MAX,
156 .mcgrps = tcmu_mcgrps,
157 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
158};
159
160static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
161{
162 struct se_device *se_dev = se_cmd->se_dev;
163 struct tcmu_dev *udev = TCMU_DEV(se_dev);
164 struct tcmu_cmd *tcmu_cmd;
165 int cmd_id;
166
167 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
168 if (!tcmu_cmd)
169 return NULL;
170
171 tcmu_cmd->se_cmd = se_cmd;
172 tcmu_cmd->tcmu_dev = udev;
173 tcmu_cmd->data_length = se_cmd->data_length;
174
175 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
176
177 idr_preload(GFP_KERNEL);
178 spin_lock_irq(&udev->commands_lock);
179 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
180 USHRT_MAX, GFP_NOWAIT);
181 spin_unlock_irq(&udev->commands_lock);
182 idr_preload_end();
183
184 if (cmd_id < 0) {
185 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
186 return NULL;
187 }
188 tcmu_cmd->cmd_id = cmd_id;
189
190 return tcmu_cmd;
191}
192
193static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
194{
195 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
196
197 size = round_up(size+offset, PAGE_SIZE);
198 vaddr -= offset;
199
200 while (size) {
201 flush_dcache_page(virt_to_page(vaddr));
202 size -= PAGE_SIZE;
203 }
204}
205
206/*
207 * Some ring helper functions. We don't assume size is a power of 2 so
208 * we can't use circ_buf.h.
209 */
210static inline size_t spc_used(size_t head, size_t tail, size_t size)
211{
212 int diff = head - tail;
213
214 if (diff >= 0)
215 return diff;
216 else
217 return size + diff;
218}
219
220static inline size_t spc_free(size_t head, size_t tail, size_t size)
221{
222 /* Keep 1 byte unused or we can't tell full from empty */
223 return (size - spc_used(head, tail, size) - 1);
224}
225
226static inline size_t head_to_end(size_t head, size_t size)
227{
228 return size - head;
229}
230
231#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
232
233/*
234 * We can't queue a command until we have space available on the cmd ring *and* space
235 * space avail on the data ring.
236 *
237 * Called with ring lock held.
238 */
239static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
240{
241 struct tcmu_mailbox *mb = udev->mb_addr;
242 size_t space;
243 u32 cmd_head;
244 size_t cmd_needed;
245
246 tcmu_flush_dcache_range(mb, sizeof(*mb));
247
248 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
249
250 /*
251 * If cmd end-of-ring space is too small then we need space for a NOP plus
252 * original cmd - cmds are internally contiguous.
253 */
254 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
255 cmd_needed = cmd_size;
256 else
257 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
258
259 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
260 if (space < cmd_needed) {
261 pr_debug("no cmd space: %u %u %u\n", cmd_head,
262 udev->cmdr_last_cleaned, udev->cmdr_size);
263 return false;
264 }
265
266 space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
267 if (space < data_needed) {
268 pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
269 udev->data_tail, udev->data_size);
270 return false;
271 }
272
273 return true;
274}
275
276static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
277{
278 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
279 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
280 size_t base_command_size, command_size;
281 struct tcmu_mailbox *mb;
282 struct tcmu_cmd_entry *entry;
283 int i;
284 struct scatterlist *sg;
285 struct iovec *iov;
286 int iov_cnt = 0;
287 uint32_t cmd_head;
288 uint64_t cdb_off;
289
290 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
291 return -EINVAL;
292
293 /*
294 * Must be a certain minimum size for response sense info, but
295 * also may be larger if the iov array is large.
296 *
297 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
298 * b/c size == offsetof one-past-element.
299 */
300 base_command_size = max(offsetof(struct tcmu_cmd_entry,
301 req.iov[se_cmd->t_data_nents + 2]),
302 sizeof(struct tcmu_cmd_entry));
303 command_size = base_command_size
304 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
305
306 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
307
308 spin_lock_irq(&udev->cmdr_lock);
309
310 mb = udev->mb_addr;
311 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
312 if ((command_size > (udev->cmdr_size / 2))
313 || tcmu_cmd->data_length > (udev->data_size - 1))
314 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
315 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
316 udev->cmdr_size, udev->data_size);
317
318 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
319 int ret;
320 DEFINE_WAIT(__wait);
321
322 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
323
324 pr_debug("sleeping for ring space\n");
325 spin_unlock_irq(&udev->cmdr_lock);
326 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
327 finish_wait(&udev->wait_cmdr, &__wait);
328 if (!ret) {
329 pr_warn("tcmu: command timed out\n");
330 return -ETIMEDOUT;
331 }
332
333 spin_lock_irq(&udev->cmdr_lock);
334
335 /* We dropped cmdr_lock, cmd_head is stale */
336 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
337 }
338
339 /* Insert a PAD if end-of-ring space is too small */
340 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
341 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
342
343 entry = (void *) mb + CMDR_OFF + cmd_head;
344 tcmu_flush_dcache_range(entry, sizeof(*entry));
345 tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD);
346 tcmu_hdr_set_len(&entry->hdr, pad_size);
347
348 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
349
350 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
351 WARN_ON(cmd_head != 0);
352 }
353
354 entry = (void *) mb + CMDR_OFF + cmd_head;
355 tcmu_flush_dcache_range(entry, sizeof(*entry));
356 tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD);
357 tcmu_hdr_set_len(&entry->hdr, command_size);
358 entry->cmd_id = tcmu_cmd->cmd_id;
359
360 /*
361 * Fix up iovecs, and handle if allocation in data ring wrapped.
362 */
363 iov = &entry->req.iov[0];
364 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
365 size_t copy_bytes = min((size_t)sg->length,
366 head_to_end(udev->data_head, udev->data_size));
367 void *from = kmap_atomic(sg_page(sg)) + sg->offset;
368 void *to = (void *) mb + udev->data_off + udev->data_head;
369
370 if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
371 memcpy(to, from, copy_bytes);
372 tcmu_flush_dcache_range(to, copy_bytes);
373 }
374
375 /* Even iov_base is relative to mb_addr */
376 iov->iov_len = copy_bytes;
377 iov->iov_base = (void *) udev->data_off + udev->data_head;
378 iov_cnt++;
379 iov++;
380
381 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
382
383 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
384 if (sg->length != copy_bytes) {
385 from += copy_bytes;
386 copy_bytes = sg->length - copy_bytes;
387
388 iov->iov_len = copy_bytes;
389 iov->iov_base = (void *) udev->data_off + udev->data_head;
390
391 if (se_cmd->data_direction == DMA_TO_DEVICE) {
392 to = (void *) mb + udev->data_off + udev->data_head;
393 memcpy(to, from, copy_bytes);
394 tcmu_flush_dcache_range(to, copy_bytes);
395 }
396
397 iov_cnt++;
398 iov++;
399
400 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
401 }
402
403 kunmap_atomic(from);
404 }
405 entry->req.iov_cnt = iov_cnt;
406
407 /* All offsets relative to mb_addr, not start of entry! */
408 cdb_off = CMDR_OFF + cmd_head + base_command_size;
409 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
410 entry->req.cdb_off = cdb_off;
411 tcmu_flush_dcache_range(entry, sizeof(*entry));
412
413 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
414 tcmu_flush_dcache_range(mb, sizeof(*mb));
415
416 spin_unlock_irq(&udev->cmdr_lock);
417
418 /* TODO: only if FLUSH and FUA? */
419 uio_event_notify(&udev->uio_info);
420
421 mod_timer(&udev->timeout,
422 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
423
424 return 0;
425}
426
427static int tcmu_queue_cmd(struct se_cmd *se_cmd)
428{
429 struct se_device *se_dev = se_cmd->se_dev;
430 struct tcmu_dev *udev = TCMU_DEV(se_dev);
431 struct tcmu_cmd *tcmu_cmd;
432 int ret;
433
434 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
435 if (!tcmu_cmd)
436 return -ENOMEM;
437
438 ret = tcmu_queue_cmd_ring(tcmu_cmd);
439 if (ret < 0) {
440 pr_err("TCMU: Could not queue command\n");
441 spin_lock_irq(&udev->commands_lock);
442 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
443 spin_unlock_irq(&udev->commands_lock);
444
445 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
446 }
447
448 return ret;
449}
450
451static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
452{
453 struct se_cmd *se_cmd = cmd->se_cmd;
454 struct tcmu_dev *udev = cmd->tcmu_dev;
455
456 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
457 /* cmd has been completed already from timeout, just reclaim data
458 ring space */
459 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
460 return;
461 }
462
463 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
464 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
465 se_cmd->scsi_sense_length);
466
467 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
468 }
469 else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
470 struct scatterlist *sg;
471 int i;
472
473 /* It'd be easier to look at entry's iovec again, but UAM */
474 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
475 size_t copy_bytes;
476 void *to;
477 void *from;
478
479 copy_bytes = min((size_t)sg->length,
480 head_to_end(udev->data_tail, udev->data_size));
481
482 to = kmap_atomic(sg_page(sg)) + sg->offset;
483 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
484 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
485 tcmu_flush_dcache_range(from, copy_bytes);
486 memcpy(to, from, copy_bytes);
487
488 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
489
490 /* Uh oh, wrapped the data buffer for this sg's data */
491 if (sg->length != copy_bytes) {
492 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
493 WARN_ON(udev->data_tail);
494 to += copy_bytes;
495 copy_bytes = sg->length - copy_bytes;
496 tcmu_flush_dcache_range(from, copy_bytes);
497 memcpy(to, from, copy_bytes);
498
499 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
500 }
501
502 kunmap_atomic(to);
503 }
504
505 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
506 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
507 } else {
508 pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction);
509 }
510
511 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
512 cmd->se_cmd = NULL;
513
514 kmem_cache_free(tcmu_cmd_cache, cmd);
515}
516
517static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
518{
519 struct tcmu_mailbox *mb;
520 LIST_HEAD(cpl_cmds);
521 unsigned long flags;
522 int handled = 0;
523
524 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
525 pr_err("ring broken, not handling completions\n");
526 return 0;
527 }
528
529 spin_lock_irqsave(&udev->cmdr_lock, flags);
530
531 mb = udev->mb_addr;
532 tcmu_flush_dcache_range(mb, sizeof(*mb));
533
534 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
535
536 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
537 struct tcmu_cmd *cmd;
538
539 tcmu_flush_dcache_range(entry, sizeof(*entry));
540
541 if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) {
542 UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
543 continue;
544 }
545 WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD);
546
547 spin_lock(&udev->commands_lock);
548 cmd = idr_find(&udev->commands, entry->cmd_id);
549 if (cmd)
550 idr_remove(&udev->commands, cmd->cmd_id);
551 spin_unlock(&udev->commands_lock);
552
553 if (!cmd) {
554 pr_err("cmd_id not found, ring is broken\n");
555 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
556 break;
557 }
558
559 tcmu_handle_completion(cmd, entry);
560
561 UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
562
563 handled++;
564 }
565
566 if (mb->cmd_tail == mb->cmd_head)
567 del_timer(&udev->timeout); /* no more pending cmds */
568
569 spin_unlock_irqrestore(&udev->cmdr_lock, flags);
570
571 wake_up(&udev->wait_cmdr);
572
573 return handled;
574}
575
576static int tcmu_check_expired_cmd(int id, void *p, void *data)
577{
578 struct tcmu_cmd *cmd = p;
579
580 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
581 return 0;
582
583 if (!time_after(cmd->deadline, jiffies))
584 return 0;
585
586 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
587 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
588 cmd->se_cmd = NULL;
589
590 kmem_cache_free(tcmu_cmd_cache, cmd);
591
592 return 0;
593}
594
595static void tcmu_device_timedout(unsigned long data)
596{
597 struct tcmu_dev *udev = (struct tcmu_dev *)data;
598 unsigned long flags;
599 int handled;
600
601 handled = tcmu_handle_completions(udev);
602
603 pr_warn("%d completions handled from timeout\n", handled);
604
605 spin_lock_irqsave(&udev->commands_lock, flags);
606 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
607 spin_unlock_irqrestore(&udev->commands_lock, flags);
608
609 /*
610 * We don't need to wakeup threads on wait_cmdr since they have their
611 * own timeout.
612 */
613}
614
615static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
616{
617 struct tcmu_hba *tcmu_hba;
618
619 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
620 if (!tcmu_hba)
621 return -ENOMEM;
622
623 tcmu_hba->host_id = host_id;
624 hba->hba_ptr = tcmu_hba;
625
626 return 0;
627}
628
629static void tcmu_detach_hba(struct se_hba *hba)
630{
631 kfree(hba->hba_ptr);
632 hba->hba_ptr = NULL;
633}
634
635static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
636{
637 struct tcmu_dev *udev;
638
639 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
640 if (!udev)
641 return NULL;
642
643 udev->name = kstrdup(name, GFP_KERNEL);
644 if (!udev->name) {
645 kfree(udev);
646 return NULL;
647 }
648
649 udev->hba = hba;
650
651 init_waitqueue_head(&udev->wait_cmdr);
652 spin_lock_init(&udev->cmdr_lock);
653
654 idr_init(&udev->commands);
655 spin_lock_init(&udev->commands_lock);
656
657 setup_timer(&udev->timeout, tcmu_device_timedout,
658 (unsigned long)udev);
659
660 udev->pass_level = TCMU_PASS_ALL;
661
662 return &udev->se_dev;
663}
664
665static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
666{
667 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
668
669 tcmu_handle_completions(tcmu_dev);
670
671 return 0;
672}
673
674/*
675 * mmap code from uio.c. Copied here because we want to hook mmap()
676 * and this stuff must come along.
677 */
678static int tcmu_find_mem_index(struct vm_area_struct *vma)
679{
680 struct tcmu_dev *udev = vma->vm_private_data;
681 struct uio_info *info = &udev->uio_info;
682
683 if (vma->vm_pgoff < MAX_UIO_MAPS) {
684 if (info->mem[vma->vm_pgoff].size == 0)
685 return -1;
686 return (int)vma->vm_pgoff;
687 }
688 return -1;
689}
690
691static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
692{
693 struct tcmu_dev *udev = vma->vm_private_data;
694 struct uio_info *info = &udev->uio_info;
695 struct page *page;
696 unsigned long offset;
697 void *addr;
698
699 int mi = tcmu_find_mem_index(vma);
700 if (mi < 0)
701 return VM_FAULT_SIGBUS;
702
703 /*
704 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
705 * to use mem[N].
706 */
707 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
708
709 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
710 if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
711 page = virt_to_page(addr);
712 else
713 page = vmalloc_to_page(addr);
714 get_page(page);
715 vmf->page = page;
716 return 0;
717}
718
719static const struct vm_operations_struct tcmu_vm_ops = {
720 .fault = tcmu_vma_fault,
721};
722
723static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
724{
725 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
726
727 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
728 vma->vm_ops = &tcmu_vm_ops;
729
730 vma->vm_private_data = udev;
731
732 /* Ensure the mmap is exactly the right size */
733 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
734 return -EINVAL;
735
736 return 0;
737}
738
739static int tcmu_open(struct uio_info *info, struct inode *inode)
740{
741 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
742
743 /* O_EXCL not supported for char devs, so fake it? */
744 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
745 return -EBUSY;
746
747 pr_debug("open\n");
748
749 return 0;
750}
751
752static int tcmu_release(struct uio_info *info, struct inode *inode)
753{
754 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
755
756 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
757
758 pr_debug("close\n");
759
760 return 0;
761}
762
763static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
764{
765 struct sk_buff *skb;
766 void *msg_header;
767 int ret = -ENOMEM;
768
769 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
770 if (!skb)
771 return ret;
772
773 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
774 if (!msg_header)
775 goto free_skb;
776
777 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
778 if (ret < 0)
779 goto free_skb;
780
781 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
782 if (ret < 0)
783 goto free_skb;
784
785 ret = genlmsg_end(skb, msg_header);
786 if (ret < 0)
787 goto free_skb;
788
789 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
790 TCMU_MCGRP_CONFIG, GFP_KERNEL);
791
792 /* We don't care if no one is listening */
793 if (ret == -ESRCH)
794 ret = 0;
795
796 return ret;
797free_skb:
798 nlmsg_free(skb);
799 return ret;
800}
801
802static int tcmu_configure_device(struct se_device *dev)
803{
804 struct tcmu_dev *udev = TCMU_DEV(dev);
805 struct tcmu_hba *hba = udev->hba->hba_ptr;
806 struct uio_info *info;
807 struct tcmu_mailbox *mb;
808 size_t size;
809 size_t used;
810 int ret = 0;
811 char *str;
812
813 info = &udev->uio_info;
814
815 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
816 udev->dev_config);
817 size += 1; /* for \0 */
818 str = kmalloc(size, GFP_KERNEL);
819 if (!str)
820 return -ENOMEM;
821
822 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
823
824 if (udev->dev_config[0])
825 snprintf(str + used, size - used, "/%s", udev->dev_config);
826
827 info->name = str;
828
829 udev->mb_addr = vzalloc(TCMU_RING_SIZE);
830 if (!udev->mb_addr) {
831 ret = -ENOMEM;
832 goto err_vzalloc;
833 }
834
835 /* mailbox fits in first part of CMDR space */
836 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
837 udev->data_off = CMDR_SIZE;
838 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
839
840 mb = udev->mb_addr;
841 mb->version = 1;
842 mb->cmdr_off = CMDR_OFF;
843 mb->cmdr_size = udev->cmdr_size;
844
845 WARN_ON(!PAGE_ALIGNED(udev->data_off));
846 WARN_ON(udev->data_size % PAGE_SIZE);
847
848 info->version = "1";
849
850 info->mem[0].name = "tcm-user command & data buffer";
851 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
852 info->mem[0].size = TCMU_RING_SIZE;
853 info->mem[0].memtype = UIO_MEM_VIRTUAL;
854
855 info->irqcontrol = tcmu_irqcontrol;
856 info->irq = UIO_IRQ_CUSTOM;
857
858 info->mmap = tcmu_mmap;
859 info->open = tcmu_open;
860 info->release = tcmu_release;
861
862 ret = uio_register_device(tcmu_root_device, info);
863 if (ret)
864 goto err_register;
865
866 /* Other attributes can be configured in userspace */
867 dev->dev_attrib.hw_block_size = 512;
868 dev->dev_attrib.hw_max_sectors = 128;
869 dev->dev_attrib.hw_queue_depth = 128;
870
871 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
872 udev->uio_info.uio_dev->minor);
873 if (ret)
874 goto err_netlink;
875
876 return 0;
877
878err_netlink:
879 uio_unregister_device(&udev->uio_info);
880err_register:
881 vfree(udev->mb_addr);
882err_vzalloc:
883 kfree(info->name);
884
885 return ret;
886}
887
888static int tcmu_check_pending_cmd(int id, void *p, void *data)
889{
890 struct tcmu_cmd *cmd = p;
891
892 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
893 return 0;
894 return -EINVAL;
895}
896
897static void tcmu_free_device(struct se_device *dev)
898{
899 struct tcmu_dev *udev = TCMU_DEV(dev);
900 int i;
901
902 del_timer_sync(&udev->timeout);
903
904 vfree(udev->mb_addr);
905
906 /* Upper layer should drain all requests before calling this */
907 spin_lock_irq(&udev->commands_lock);
908 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
909 idr_destroy(&udev->commands);
910 spin_unlock_irq(&udev->commands_lock);
911 WARN_ON(i);
912
913 /* Device was configured */
914 if (udev->uio_info.uio_dev) {
915 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
916 udev->uio_info.uio_dev->minor);
917
918 uio_unregister_device(&udev->uio_info);
919 kfree(udev->uio_info.name);
920 kfree(udev->name);
921 }
922
923 kfree(udev);
924}
925
926enum {
927 Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level,
928};
929
930static match_table_t tokens = {
931 {Opt_dev_config, "dev_config=%s"},
932 {Opt_dev_size, "dev_size=%u"},
933 {Opt_pass_level, "pass_level=%u"},
934 {Opt_err, NULL}
935};
936
937static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
938 const char *page, ssize_t count)
939{
940 struct tcmu_dev *udev = TCMU_DEV(dev);
941 char *orig, *ptr, *opts, *arg_p;
942 substring_t args[MAX_OPT_ARGS];
943 int ret = 0, token;
944 int arg;
945
946 opts = kstrdup(page, GFP_KERNEL);
947 if (!opts)
948 return -ENOMEM;
949
950 orig = opts;
951
952 while ((ptr = strsep(&opts, ",\n")) != NULL) {
953 if (!*ptr)
954 continue;
955
956 token = match_token(ptr, tokens, args);
957 switch (token) {
958 case Opt_dev_config:
959 if (match_strlcpy(udev->dev_config, &args[0],
960 TCMU_CONFIG_LEN) == 0) {
961 ret = -EINVAL;
962 break;
963 }
964 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
965 break;
966 case Opt_dev_size:
967 arg_p = match_strdup(&args[0]);
968 if (!arg_p) {
969 ret = -ENOMEM;
970 break;
971 }
972 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
973 kfree(arg_p);
974 if (ret < 0)
975 pr_err("kstrtoul() failed for dev_size=\n");
976 break;
977 case Opt_pass_level:
978 match_int(args, &arg);
979 if (arg >= TCMU_PASS_INVALID) {
980 pr_warn("TCMU: Invalid pass_level: %d\n", arg);
981 break;
982 }
983
984 pr_debug("TCMU: Setting pass_level to %d\n", arg);
985 udev->pass_level = arg;
986 break;
987 default:
988 break;
989 }
990 }
991
992 kfree(orig);
993 return (!ret) ? count : ret;
994}
995
996static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
997{
998 struct tcmu_dev *udev = TCMU_DEV(dev);
999 ssize_t bl = 0;
1000
1001 bl = sprintf(b + bl, "Config: %s ",
1002 udev->dev_config[0] ? udev->dev_config : "NULL");
1003 bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n",
1004 udev->dev_size, udev->pass_level);
1005
1006 return bl;
1007}
1008
1009static sector_t tcmu_get_blocks(struct se_device *dev)
1010{
1011 struct tcmu_dev *udev = TCMU_DEV(dev);
1012
1013 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1014 dev->dev_attrib.block_size);
1015}
1016
1017static sense_reason_t
1018tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
1019 enum dma_data_direction data_direction)
1020{
1021 int ret;
1022
1023 ret = tcmu_queue_cmd(se_cmd);
1024
1025 if (ret != 0)
1026 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1027 else
1028 return TCM_NO_SENSE;
1029}
1030
1031static sense_reason_t
1032tcmu_pass_op(struct se_cmd *se_cmd)
1033{
1034 int ret = tcmu_queue_cmd(se_cmd);
1035
1036 if (ret != 0)
1037 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1038 else
1039 return TCM_NO_SENSE;
1040}
1041
1042static struct sbc_ops tcmu_sbc_ops = {
1043 .execute_rw = tcmu_execute_rw,
1044 .execute_sync_cache = tcmu_pass_op,
1045 .execute_write_same = tcmu_pass_op,
1046 .execute_write_same_unmap = tcmu_pass_op,
1047 .execute_unmap = tcmu_pass_op,
1048};
1049
1050static sense_reason_t
1051tcmu_parse_cdb(struct se_cmd *cmd)
1052{
1053 unsigned char *cdb = cmd->t_task_cdb;
1054 struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
1055 sense_reason_t ret;
1056
1057 switch (udev->pass_level) {
1058 case TCMU_PASS_ALL:
1059 /* We're just like pscsi, then */
1060 /*
1061 * For REPORT LUNS we always need to emulate the response, for everything
1062 * else, pass it up.
1063 */
1064 switch (cdb[0]) {
1065 case REPORT_LUNS:
1066 cmd->execute_cmd = spc_emulate_report_luns;
1067 break;
1068 case READ_6:
1069 case READ_10:
1070 case READ_12:
1071 case READ_16:
1072 case WRITE_6:
1073 case WRITE_10:
1074 case WRITE_12:
1075 case WRITE_16:
1076 case WRITE_VERIFY:
1077 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1078 /* FALLTHROUGH */
1079 default:
1080 cmd->execute_cmd = tcmu_pass_op;
1081 }
1082 ret = TCM_NO_SENSE;
1083 break;
1084 case TCMU_PASS_IO:
1085 ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
1086 break;
1087 default:
1088 pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
1089 ret = TCM_CHECK_CONDITION_ABORT_CMD;
1090 }
1091
1092 return ret;
1093}
1094
1095static struct se_subsystem_api tcmu_template = {
1096 .name = "user",
1097 .inquiry_prod = "USER",
1098 .inquiry_rev = TCMU_VERSION,
1099 .owner = THIS_MODULE,
1100 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
1101 .attach_hba = tcmu_attach_hba,
1102 .detach_hba = tcmu_detach_hba,
1103 .alloc_device = tcmu_alloc_device,
1104 .configure_device = tcmu_configure_device,
1105 .free_device = tcmu_free_device,
1106 .parse_cdb = tcmu_parse_cdb,
1107 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1108 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1109 .get_device_type = sbc_get_device_type,
1110 .get_blocks = tcmu_get_blocks,
1111};
1112
1113static int __init tcmu_module_init(void)
1114{
1115 int ret;
1116
1117 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1118
1119 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1120 sizeof(struct tcmu_cmd),
1121 __alignof__(struct tcmu_cmd),
1122 0, NULL);
1123 if (!tcmu_cmd_cache)
1124 return -ENOMEM;
1125
1126 tcmu_root_device = root_device_register("tcm_user");
1127 if (IS_ERR(tcmu_root_device)) {
1128 ret = PTR_ERR(tcmu_root_device);
1129 goto out_free_cache;
1130 }
1131
1132 ret = genl_register_family(&tcmu_genl_family);
1133 if (ret < 0) {
1134 goto out_unreg_device;
1135 }
1136
1137 ret = transport_subsystem_register(&tcmu_template);
1138 if (ret)
1139 goto out_unreg_genl;
1140
1141 return 0;
1142
1143out_unreg_genl:
1144 genl_unregister_family(&tcmu_genl_family);
1145out_unreg_device:
1146 root_device_unregister(tcmu_root_device);
1147out_free_cache:
1148 kmem_cache_destroy(tcmu_cmd_cache);
1149
1150 return ret;
1151}
1152
1153static void __exit tcmu_module_exit(void)
1154{
1155 transport_subsystem_release(&tcmu_template);
1156 genl_unregister_family(&tcmu_genl_family);
1157 root_device_unregister(tcmu_root_device);
1158 kmem_cache_destroy(tcmu_cmd_cache);
1159}
1160
1161MODULE_DESCRIPTION("TCM USER subsystem plugin");
1162MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1163MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1164MODULE_LICENSE("GPL");
1165
1166module_init(tcmu_module_init);
1167module_exit(tcmu_module_exit);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 21ce50880c79..ccee7e332a4d 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -98,7 +98,7 @@ static void ft_tport_delete(struct ft_tport *tport)
98 ft_sess_delete_all(tport); 98 ft_sess_delete_all(tport);
99 lport = tport->lport; 99 lport = tport->lport;
100 BUG_ON(tport != lport->prov[FC_TYPE_FCP]); 100 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
101 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL); 101 RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
102 102
103 tpg = tport->tpg; 103 tpg = tport->tpg;
104 if (tpg) { 104 if (tpg) {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index ef5587fe2c69..f554d25b4399 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -84,6 +84,16 @@ config THERMAL_GOV_STEP_WISE
84 Enable this to manage platform thermals using a simple linear 84 Enable this to manage platform thermals using a simple linear
85 governor. 85 governor.
86 86
87config THERMAL_GOV_BANG_BANG
88 bool "Bang Bang thermal governor"
89 default n
90 help
91 Enable this to manage platform thermals using bang bang governor.
92
93 Say 'Y' here if you want to use two point temperature regulation
94 used for fans without throttling. Some fan drivers depend on this
95 governor to be enabled (e.g. acerhdf).
96
87config THERMAL_GOV_USER_SPACE 97config THERMAL_GOV_USER_SPACE
88 bool "User_space thermal governor" 98 bool "User_space thermal governor"
89 help 99 help
@@ -207,21 +217,6 @@ config X86_PKG_TEMP_THERMAL
207 two trip points which can be set by user to get notifications via thermal 217 two trip points which can be set by user to get notifications via thermal
208 notification methods. 218 notification methods.
209 219
210config ACPI_INT3403_THERMAL
211 tristate "ACPI INT3403 thermal driver"
212 depends on X86 && ACPI
213 help
214 Newer laptops and tablets that use ACPI may have thermal sensors
215 outside the core CPU/SOC for thermal safety reasons. These
216 temperature sensors are also exposed for the OS to use via the so
217 called INT3403 ACPI object. This driver will, on devices that have
218 such sensors, expose the temperature information from these sensors
219 to userspace via the normal thermal framework. This means that a wide
220 range of applications and GUI widgets can show this information to
221 the user or use this information for making decisions. For example,
222 the Intel Thermal Daemon can use this information to allow the user
223 to select his laptop to run without turning on the fans.
224
225config INTEL_SOC_DTS_THERMAL 220config INTEL_SOC_DTS_THERMAL
226 tristate "Intel SoCs DTS thermal driver" 221 tristate "Intel SoCs DTS thermal driver"
227 depends on X86 && IOSF_MBI 222 depends on X86 && IOSF_MBI
@@ -234,6 +229,30 @@ config INTEL_SOC_DTS_THERMAL
234 notification methods.The other trip is a critical trip point, which 229 notification methods.The other trip is a critical trip point, which
235 was set by the driver based on the TJ MAX temperature. 230 was set by the driver based on the TJ MAX temperature.
236 231
232config INT340X_THERMAL
233 tristate "ACPI INT340X thermal drivers"
234 depends on X86 && ACPI
235 select THERMAL_GOV_USER_SPACE
236 select ACPI_THERMAL_REL
237 select ACPI_FAN
238 help
239 Newer laptops and tablets that use ACPI may have thermal sensors and
240 other devices with thermal control capabilities outside the core
241 CPU/SOC, for thermal safety reasons.
242 They are exposed for the OS to use via the INT3400 ACPI device object
243 as the master, and INT3401~INT340B ACPI device objects as the slaves.
244 Enable this to expose the temperature information and cooling ability
245 from these objects to userspace via the normal thermal framework.
246 This means that a wide range of applications and GUI widgets can show
247 the information to the user or use this information for making
248 decisions. For example, the Intel Thermal Daemon can use this
249 information to allow the user to select his laptop to run without
250 turning on the fans.
251
252config ACPI_THERMAL_REL
253 tristate
254 depends on ACPI
255
237menu "Texas Instruments thermal drivers" 256menu "Texas Instruments thermal drivers"
238source "drivers/thermal/ti-soc-thermal/Kconfig" 257source "drivers/thermal/ti-soc-thermal/Kconfig"
239endmenu 258endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 31e232f84b6b..39c4fe87da2f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -11,6 +11,7 @@ thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o
11 11
12# governors 12# governors
13thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o 13thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
14thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o
14thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o 15thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
15thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o 16thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
16 17
@@ -31,5 +32,5 @@ obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
31obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o 32obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
32obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o 33obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
33obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ 34obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
34obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o 35obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
35obj-$(CONFIG_ST_THERMAL) += st/ 36obj-$(CONFIG_ST_THERMAL) += st/
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 944ba2f340c8..6e0a3fbfae86 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/thermal.h> 25#include <linux/thermal.h>
26#include <trace/events/thermal.h>
26 27
27#include "thermal_core.h" 28#include "thermal_core.h"
28 29
@@ -34,6 +35,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
34{ 35{
35 int count = 0; 36 int count = 0;
36 unsigned long trip_temp; 37 unsigned long trip_temp;
38 enum thermal_trip_type trip_type;
37 39
38 if (tz->trips == 0 || !tz->ops->get_trip_temp) 40 if (tz->trips == 0 || !tz->ops->get_trip_temp)
39 return 0; 41 return 0;
@@ -43,6 +45,16 @@ static int get_trip_level(struct thermal_zone_device *tz)
43 if (tz->temperature < trip_temp) 45 if (tz->temperature < trip_temp)
44 break; 46 break;
45 } 47 }
48
49 /*
50 * count > 0 only if temperature is greater than first trip
51 * point, in which case, trip_point = count - 1
52 */
53 if (count > 0) {
54 tz->ops->get_trip_type(tz, count - 1, &trip_type);
55 trace_thermal_zone_trip(tz, count - 1, trip_type);
56 }
57
46 return count; 58 return count;
47} 59}
48 60
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
new file mode 100644
index 000000000000..c5dd76b2ee74
--- /dev/null
+++ b/drivers/thermal/gov_bang_bang.c
@@ -0,0 +1,131 @@
1/*
2 * gov_bang_bang.c - A simple thermal throttling governor using hysteresis
3 *
4 * Copyright (C) 2014 Peter Feuerer <peter@piie.net>
5 *
6 * Based on step_wise.c with following Copyrights:
7 * Copyright (C) 2012 Intel Corp
8 * Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, version 2.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU General Public License for more details.
19 *
20 */
21
22#include <linux/thermal.h>
23
24#include "thermal_core.h"
25
26static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
27{
28 long trip_temp;
29 unsigned long trip_hyst;
30 struct thermal_instance *instance;
31
32 tz->ops->get_trip_temp(tz, trip, &trip_temp);
33 tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
34
35 dev_dbg(&tz->device, "Trip%d[temp=%ld]:temp=%d:hyst=%ld\n",
36 trip, trip_temp, tz->temperature,
37 trip_hyst);
38
39 mutex_lock(&tz->lock);
40
41 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
42 if (instance->trip != trip)
43 continue;
44
45 /* in case fan is in initial state, switch the fan off */
46 if (instance->target == THERMAL_NO_TARGET)
47 instance->target = 0;
48
49 /* in case fan is neither on nor off set the fan to active */
50 if (instance->target != 0 && instance->target != 1) {
51 pr_warn("Thermal instance %s controlled by bang-bang has unexpected state: %ld\n",
52 instance->name, instance->target);
53 instance->target = 1;
54 }
55
56 /*
57 * enable fan when temperature exceeds trip_temp and disable
58 * the fan in case it falls below trip_temp minus hysteresis
59 */
60 if (instance->target == 0 && tz->temperature >= trip_temp)
61 instance->target = 1;
62 else if (instance->target == 1 &&
63 tz->temperature < trip_temp - trip_hyst)
64 instance->target = 0;
65
66 dev_dbg(&instance->cdev->device, "target=%d\n",
67 (int)instance->target);
68
69 instance->cdev->updated = false; /* cdev needs update */
70 }
71
72 mutex_unlock(&tz->lock);
73}
74
75/**
76 * bang_bang_control - controls devices associated with the given zone
77 * @tz - thermal_zone_device
78 * @trip - the trip point
79 *
80 * Regulation Logic: a two point regulation, deliver cooling state depending
81 * on the previous state shown in this diagram:
82 *
83 * Fan: OFF ON
84 *
85 * |
86 * |
87 * trip_temp: +---->+
88 * | | ^
89 * | | |
90 * | | Temperature
91 * (trip_temp - hyst): +<----+
92 * |
93 * |
94 * |
95 *
96 * * If the fan is not running and temperature exceeds trip_temp, the fan
97 * gets turned on.
98 * * In case the fan is running, temperature must fall below
99 * (trip_temp - hyst) so that the fan gets turned off again.
100 *
101 */
102static int bang_bang_control(struct thermal_zone_device *tz, int trip)
103{
104 struct thermal_instance *instance;
105
106 thermal_zone_trip_update(tz, trip);
107
108 mutex_lock(&tz->lock);
109
110 list_for_each_entry(instance, &tz->thermal_instances, tz_node)
111 thermal_cdev_update(instance->cdev);
112
113 mutex_unlock(&tz->lock);
114
115 return 0;
116}
117
118static struct thermal_governor thermal_gov_bang_bang = {
119 .name = "bang_bang",
120 .throttle = bang_bang_control,
121};
122
123int thermal_gov_bang_bang_register(void)
124{
125 return thermal_register_governor(&thermal_gov_bang_bang);
126}
127
128void thermal_gov_bang_bang_unregister(void)
129{
130 thermal_unregister_governor(&thermal_gov_bang_bang);
131}
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 2c516f2eebed..461bf3d033a0 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -19,6 +19,7 @@
19#include <linux/mfd/syscon.h> 19#include <linux/mfd/syscon.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_device.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -31,6 +32,11 @@
31 32
32#define MISC0 0x0150 33#define MISC0 0x0150
33#define MISC0_REFTOP_SELBIASOFF (1 << 3) 34#define MISC0_REFTOP_SELBIASOFF (1 << 3)
35#define MISC1 0x0160
36#define MISC1_IRQ_TEMPHIGH (1 << 29)
37/* Below LOW and PANIC bits are only for TEMPMON_IMX6SX */
38#define MISC1_IRQ_TEMPLOW (1 << 28)
39#define MISC1_IRQ_TEMPPANIC (1 << 27)
34 40
35#define TEMPSENSE0 0x0180 41#define TEMPSENSE0 0x0180
36#define TEMPSENSE0_ALARM_VALUE_SHIFT 20 42#define TEMPSENSE0_ALARM_VALUE_SHIFT 20
@@ -43,6 +49,12 @@
43 49
44#define TEMPSENSE1 0x0190 50#define TEMPSENSE1 0x0190
45#define TEMPSENSE1_MEASURE_FREQ 0xffff 51#define TEMPSENSE1_MEASURE_FREQ 0xffff
52/* Below TEMPSENSE2 is only for TEMPMON_IMX6SX */
53#define TEMPSENSE2 0x0290
54#define TEMPSENSE2_LOW_VALUE_SHIFT 0
55#define TEMPSENSE2_LOW_VALUE_MASK 0xfff
56#define TEMPSENSE2_PANIC_VALUE_SHIFT 16
57#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000
46 58
47#define OCOTP_ANA1 0x04e0 59#define OCOTP_ANA1 0x04e0
48 60
@@ -66,6 +78,21 @@ enum imx_thermal_trip {
66#define FACTOR1 15976 78#define FACTOR1 15976
67#define FACTOR2 4297157 79#define FACTOR2 4297157
68 80
81#define TEMPMON_IMX6Q 1
82#define TEMPMON_IMX6SX 2
83
84struct thermal_soc_data {
85 u32 version;
86};
87
88static struct thermal_soc_data thermal_imx6q_data = {
89 .version = TEMPMON_IMX6Q,
90};
91
92static struct thermal_soc_data thermal_imx6sx_data = {
93 .version = TEMPMON_IMX6SX,
94};
95
69struct imx_thermal_data { 96struct imx_thermal_data {
70 struct thermal_zone_device *tz; 97 struct thermal_zone_device *tz;
71 struct thermal_cooling_device *cdev; 98 struct thermal_cooling_device *cdev;
@@ -79,8 +106,21 @@ struct imx_thermal_data {
79 bool irq_enabled; 106 bool irq_enabled;
80 int irq; 107 int irq;
81 struct clk *thermal_clk; 108 struct clk *thermal_clk;
109 const struct thermal_soc_data *socdata;
82}; 110};
83 111
112static void imx_set_panic_temp(struct imx_thermal_data *data,
113 signed long panic_temp)
114{
115 struct regmap *map = data->tempmon;
116 int critical_value;
117
118 critical_value = (data->c2 - panic_temp) / data->c1;
119 regmap_write(map, TEMPSENSE2 + REG_CLR, TEMPSENSE2_PANIC_VALUE_MASK);
120 regmap_write(map, TEMPSENSE2 + REG_SET, critical_value <<
121 TEMPSENSE2_PANIC_VALUE_SHIFT);
122}
123
84static void imx_set_alarm_temp(struct imx_thermal_data *data, 124static void imx_set_alarm_temp(struct imx_thermal_data *data,
85 signed long alarm_temp) 125 signed long alarm_temp)
86{ 126{
@@ -142,13 +182,17 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
142 /* See imx_get_sensor_data() for formula derivation */ 182 /* See imx_get_sensor_data() for formula derivation */
143 *temp = data->c2 - n_meas * data->c1; 183 *temp = data->c2 - n_meas * data->c1;
144 184
145 /* Update alarm value to next higher trip point */ 185 /* Update alarm value to next higher trip point for TEMPMON_IMX6Q */
146 if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive) 186 if (data->socdata->version == TEMPMON_IMX6Q) {
147 imx_set_alarm_temp(data, data->temp_critical); 187 if (data->alarm_temp == data->temp_passive &&
148 if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) { 188 *temp >= data->temp_passive)
149 imx_set_alarm_temp(data, data->temp_passive); 189 imx_set_alarm_temp(data, data->temp_critical);
150 dev_dbg(&tz->device, "thermal alarm off: T < %lu\n", 190 if (data->alarm_temp == data->temp_critical &&
151 data->alarm_temp / 1000); 191 *temp < data->temp_passive) {
192 imx_set_alarm_temp(data, data->temp_passive);
193 dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
194 data->alarm_temp / 1000);
195 }
152 } 196 }
153 197
154 if (*temp != data->last_temp) { 198 if (*temp != data->last_temp) {
@@ -398,8 +442,17 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
398 return IRQ_HANDLED; 442 return IRQ_HANDLED;
399} 443}
400 444
445static const struct of_device_id of_imx_thermal_match[] = {
446 { .compatible = "fsl,imx6q-tempmon", .data = &thermal_imx6q_data, },
447 { .compatible = "fsl,imx6sx-tempmon", .data = &thermal_imx6sx_data, },
448 { /* end */ }
449};
450MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
451
401static int imx_thermal_probe(struct platform_device *pdev) 452static int imx_thermal_probe(struct platform_device *pdev)
402{ 453{
454 const struct of_device_id *of_id =
455 of_match_device(of_imx_thermal_match, &pdev->dev);
403 struct imx_thermal_data *data; 456 struct imx_thermal_data *data;
404 struct cpumask clip_cpus; 457 struct cpumask clip_cpus;
405 struct regmap *map; 458 struct regmap *map;
@@ -418,6 +471,20 @@ static int imx_thermal_probe(struct platform_device *pdev)
418 } 471 }
419 data->tempmon = map; 472 data->tempmon = map;
420 473
474 data->socdata = of_id->data;
475
476 /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
477 if (data->socdata->version == TEMPMON_IMX6SX) {
478 regmap_write(map, MISC1 + REG_CLR, MISC1_IRQ_TEMPHIGH |
479 MISC1_IRQ_TEMPLOW | MISC1_IRQ_TEMPPANIC);
480 /*
481 * reset value of LOW ALARM is incorrect, set it to lowest
482 * value to avoid false trigger of low alarm.
483 */
484 regmap_write(map, TEMPSENSE2 + REG_SET,
485 TEMPSENSE2_LOW_VALUE_MASK);
486 }
487
421 data->irq = platform_get_irq(pdev, 0); 488 data->irq = platform_get_irq(pdev, 0);
422 if (data->irq < 0) 489 if (data->irq < 0)
423 return data->irq; 490 return data->irq;
@@ -489,6 +556,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
489 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ 556 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
490 regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq); 557 regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq);
491 imx_set_alarm_temp(data, data->temp_passive); 558 imx_set_alarm_temp(data, data->temp_passive);
559
560 if (data->socdata->version == TEMPMON_IMX6SX)
561 imx_set_panic_temp(data, data->temp_critical);
562
492 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); 563 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
493 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); 564 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
494 565
@@ -550,12 +621,6 @@ static int imx_thermal_resume(struct device *dev)
550static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, 621static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
551 imx_thermal_suspend, imx_thermal_resume); 622 imx_thermal_suspend, imx_thermal_resume);
552 623
553static const struct of_device_id of_imx_thermal_match[] = {
554 { .compatible = "fsl,imx6q-tempmon", },
555 { /* end */ }
556};
557MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
558
559static struct platform_driver imx_thermal = { 624static struct platform_driver imx_thermal = {
560 .driver = { 625 .driver = {
561 .name = "imx_thermal", 626 .name = "imx_thermal",
diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c
deleted file mode 100644
index 17554eeb3953..000000000000
--- a/drivers/thermal/int3403_thermal.c
+++ /dev/null
@@ -1,296 +0,0 @@
1/*
2 * ACPI INT3403 thermal driver
3 * Copyright (c) 2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/acpi.h>
20#include <linux/thermal.h>
21
22#define INT3403_TYPE_SENSOR 0x03
23#define INT3403_PERF_CHANGED_EVENT 0x80
24#define INT3403_THERMAL_EVENT 0x90
25
26#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
27#define KELVIN_OFFSET 2732
28#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
29
30#define ACPI_INT3403_CLASS "int3403"
31#define ACPI_INT3403_FILE_STATE "state"
32
33struct int3403_sensor {
34 struct thermal_zone_device *tzone;
35 unsigned long *thresholds;
36 unsigned long crit_temp;
37 int crit_trip_id;
38 unsigned long psv_temp;
39 int psv_trip_id;
40};
41
42static int sys_get_curr_temp(struct thermal_zone_device *tzone,
43 unsigned long *temp)
44{
45 struct acpi_device *device = tzone->devdata;
46 unsigned long long tmp;
47 acpi_status status;
48
49 status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
50 if (ACPI_FAILURE(status))
51 return -EIO;
52
53 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
54
55 return 0;
56}
57
58static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
59 int trip, unsigned long *temp)
60{
61 struct acpi_device *device = tzone->devdata;
62 unsigned long long hyst;
63 acpi_status status;
64
65 status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
66 if (ACPI_FAILURE(status))
67 return -EIO;
68
69 /*
70 * Thermal hysteresis represents a temperature difference.
71 * Kelvin and Celsius have same degree size. So the
72 * conversion here between tenths of degree Kelvin unit
73 * and Milli-Celsius unit is just to multiply 100.
74 */
75 *temp = hyst * 100;
76
77 return 0;
78}
79
80static int sys_get_trip_temp(struct thermal_zone_device *tzone,
81 int trip, unsigned long *temp)
82{
83 struct acpi_device *device = tzone->devdata;
84 struct int3403_sensor *obj = acpi_driver_data(device);
85
86 if (trip == obj->crit_trip_id)
87 *temp = obj->crit_temp;
88 else if (trip == obj->psv_trip_id)
89 *temp = obj->psv_temp;
90 else {
91 /*
92 * get_trip_temp is a mandatory callback but
93 * PATx method doesn't return any value, so return
94 * cached value, which was last set from user space.
95 */
96 *temp = obj->thresholds[trip];
97 }
98
99 return 0;
100}
101
102static int sys_get_trip_type(struct thermal_zone_device *thermal,
103 int trip, enum thermal_trip_type *type)
104{
105 struct acpi_device *device = thermal->devdata;
106 struct int3403_sensor *obj = acpi_driver_data(device);
107
108 /* Mandatory callback, may not mean much here */
109 if (trip == obj->crit_trip_id)
110 *type = THERMAL_TRIP_CRITICAL;
111 else
112 *type = THERMAL_TRIP_PASSIVE;
113
114 return 0;
115}
116
117int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
118 unsigned long temp)
119{
120 struct acpi_device *device = tzone->devdata;
121 acpi_status status;
122 char name[10];
123 int ret = 0;
124 struct int3403_sensor *obj = acpi_driver_data(device);
125
126 snprintf(name, sizeof(name), "PAT%d", trip);
127 if (acpi_has_method(device->handle, name)) {
128 status = acpi_execute_simple_method(device->handle, name,
129 MILLI_CELSIUS_TO_DECI_KELVIN(temp,
130 KELVIN_OFFSET));
131 if (ACPI_FAILURE(status))
132 ret = -EIO;
133 else
134 obj->thresholds[trip] = temp;
135 } else {
136 ret = -EIO;
137 dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
138 }
139
140 return ret;
141}
142
143static struct thermal_zone_device_ops tzone_ops = {
144 .get_temp = sys_get_curr_temp,
145 .get_trip_temp = sys_get_trip_temp,
146 .get_trip_type = sys_get_trip_type,
147 .set_trip_temp = sys_set_trip_temp,
148 .get_trip_hyst = sys_get_trip_hyst,
149};
150
151static void acpi_thermal_notify(struct acpi_device *device, u32 event)
152{
153 struct int3403_sensor *obj;
154
155 if (!device)
156 return;
157
158 obj = acpi_driver_data(device);
159 if (!obj)
160 return;
161
162 switch (event) {
163 case INT3403_PERF_CHANGED_EVENT:
164 break;
165 case INT3403_THERMAL_EVENT:
166 thermal_zone_device_update(obj->tzone);
167 break;
168 default:
169 dev_err(&device->dev, "Unsupported event [0x%x]\n", event);
170 break;
171 }
172}
173
174static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
175{
176 unsigned long long crt;
177 acpi_status status;
178
179 status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
180 if (ACPI_FAILURE(status))
181 return -EIO;
182
183 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
184
185 return 0;
186}
187
188static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
189{
190 unsigned long long psv;
191 acpi_status status;
192
193 status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
194 if (ACPI_FAILURE(status))
195 return -EIO;
196
197 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
198
199 return 0;
200}
201
202static int acpi_int3403_add(struct acpi_device *device)
203{
204 int result = 0;
205 unsigned long long ptyp;
206 acpi_status status;
207 struct int3403_sensor *obj;
208 unsigned long long trip_cnt;
209 int trip_mask = 0;
210
211 if (!device)
212 return -EINVAL;
213
214 status = acpi_evaluate_integer(device->handle, "PTYP", NULL, &ptyp);
215 if (ACPI_FAILURE(status))
216 return -EINVAL;
217
218 if (ptyp != INT3403_TYPE_SENSOR)
219 return -EINVAL;
220
221 obj = devm_kzalloc(&device->dev, sizeof(*obj), GFP_KERNEL);
222 if (!obj)
223 return -ENOMEM;
224
225 device->driver_data = obj;
226
227 status = acpi_evaluate_integer(device->handle, "PATC", NULL,
228 &trip_cnt);
229 if (ACPI_FAILURE(status))
230 trip_cnt = 0;
231
232 if (trip_cnt) {
233 /* We have to cache, thresholds can't be readback */
234 obj->thresholds = devm_kzalloc(&device->dev,
235 sizeof(*obj->thresholds) * trip_cnt,
236 GFP_KERNEL);
237 if (!obj->thresholds)
238 return -ENOMEM;
239 trip_mask = BIT(trip_cnt) - 1;
240 }
241
242 obj->psv_trip_id = -1;
243 if (!sys_get_trip_psv(device, &obj->psv_temp))
244 obj->psv_trip_id = trip_cnt++;
245
246 obj->crit_trip_id = -1;
247 if (!sys_get_trip_crt(device, &obj->crit_temp))
248 obj->crit_trip_id = trip_cnt++;
249
250 obj->tzone = thermal_zone_device_register(acpi_device_bid(device),
251 trip_cnt, trip_mask, device, &tzone_ops,
252 NULL, 0, 0);
253 if (IS_ERR(obj->tzone)) {
254 result = PTR_ERR(obj->tzone);
255 return result;
256 }
257
258 strcpy(acpi_device_name(device), "INT3403");
259 strcpy(acpi_device_class(device), ACPI_INT3403_CLASS);
260
261 return 0;
262}
263
264static int acpi_int3403_remove(struct acpi_device *device)
265{
266 struct int3403_sensor *obj;
267
268 obj = acpi_driver_data(device);
269 thermal_zone_device_unregister(obj->tzone);
270
271 return 0;
272}
273
274ACPI_MODULE_NAME("int3403");
275static const struct acpi_device_id int3403_device_ids[] = {
276 {"INT3403", 0},
277 {"", 0},
278};
279MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
280
281static struct acpi_driver acpi_int3403_driver = {
282 .name = "INT3403",
283 .class = ACPI_INT3403_CLASS,
284 .ids = int3403_device_ids,
285 .ops = {
286 .add = acpi_int3403_add,
287 .remove = acpi_int3403_remove,
288 .notify = acpi_thermal_notify,
289 },
290};
291
292module_acpi_driver(acpi_int3403_driver);
293
294MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
295MODULE_LICENSE("GPL v2");
296MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
new file mode 100644
index 000000000000..ffe40bffaf1a
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
2obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
3obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
4obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
new file mode 100644
index 000000000000..0d8db808f0ae
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -0,0 +1,400 @@
1/* acpi_thermal_rel.c driver for exporting ACPI thermal relationship
2 *
3 * Copyright (c) 2014 Intel Corp
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 */
10
11/*
12 * Two functionalities included:
13 * 1. Export _TRT, _ART, via misc device interface to the userspace.
14 * 2. Provide parsing result to kernel drivers
15 *
16 */
17#include <linux/init.h>
18#include <linux/export.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/io.h>
23#include <linux/acpi.h>
24#include <linux/uaccess.h>
25#include <linux/miscdevice.h>
26#include "acpi_thermal_rel.h"
27
28static acpi_handle acpi_thermal_rel_handle;
29static DEFINE_SPINLOCK(acpi_thermal_rel_chrdev_lock);
30static int acpi_thermal_rel_chrdev_count; /* #times opened */
31static int acpi_thermal_rel_chrdev_exclu; /* already open exclusive? */
32
33static int acpi_thermal_rel_open(struct inode *inode, struct file *file)
34{
35 spin_lock(&acpi_thermal_rel_chrdev_lock);
36 if (acpi_thermal_rel_chrdev_exclu ||
37 (acpi_thermal_rel_chrdev_count && (file->f_flags & O_EXCL))) {
38 spin_unlock(&acpi_thermal_rel_chrdev_lock);
39 return -EBUSY;
40 }
41
42 if (file->f_flags & O_EXCL)
43 acpi_thermal_rel_chrdev_exclu = 1;
44 acpi_thermal_rel_chrdev_count++;
45
46 spin_unlock(&acpi_thermal_rel_chrdev_lock);
47
48 return nonseekable_open(inode, file);
49}
50
51static int acpi_thermal_rel_release(struct inode *inode, struct file *file)
52{
53 spin_lock(&acpi_thermal_rel_chrdev_lock);
54 acpi_thermal_rel_chrdev_count--;
55 acpi_thermal_rel_chrdev_exclu = 0;
56 spin_unlock(&acpi_thermal_rel_chrdev_lock);
57
58 return 0;
59}
60
61/**
62 * acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling
63 *
64 * @handle: ACPI handle of the device contains _TRT
65 * @art_count: the number of valid entries resulted from parsing _TRT
66 * @artp: pointer to pointer of array of art entries in parsing result
67 * @create_dev: whether to create platform devices for target and source
68 *
69 */
70int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
71 bool create_dev)
72{
73 acpi_status status;
74 int result = 0;
75 int i;
76 int nr_bad_entries = 0;
77 struct trt *trts;
78 struct acpi_device *adev;
79 union acpi_object *p;
80 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
81 struct acpi_buffer element = { 0, NULL };
82 struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
83
84 if (!acpi_has_method(handle, "_TRT"))
85 return 0;
86
87 status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
88 if (ACPI_FAILURE(status))
89 return -ENODEV;
90
91 p = buffer.pointer;
92 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
93 pr_err("Invalid _TRT data\n");
94 result = -EFAULT;
95 goto end;
96 }
97
98 *trt_count = p->package.count;
99 trts = kzalloc(*trt_count * sizeof(struct trt), GFP_KERNEL);
100 if (!trts) {
101 result = -ENOMEM;
102 goto end;
103 }
104
105 for (i = 0; i < *trt_count; i++) {
106 struct trt *trt = &trts[i - nr_bad_entries];
107
108 element.length = sizeof(struct trt);
109 element.pointer = trt;
110
111 status = acpi_extract_package(&(p->package.elements[i]),
112 &trt_format, &element);
113 if (ACPI_FAILURE(status)) {
114 nr_bad_entries++;
115 pr_warn("_TRT package %d is invalid, ignored\n", i);
116 continue;
117 }
118 if (!create_dev)
119 continue;
120
121 result = acpi_bus_get_device(trt->source, &adev);
122 if (!result)
123 acpi_create_platform_device(adev);
124 else
125 pr_warn("Failed to get source ACPI device\n");
126
127 result = acpi_bus_get_device(trt->target, &adev);
128 if (!result)
129 acpi_create_platform_device(adev);
130 else
131 pr_warn("Failed to get target ACPI device\n");
132 }
133
134 *trtp = trts;
135 /* don't count bad entries */
136 *trt_count -= nr_bad_entries;
137end:
138 kfree(buffer.pointer);
139 return result;
140}
141EXPORT_SYMBOL(acpi_parse_trt);
142
143/**
144 * acpi_parse_art - Parse Active Relationship Table _ART
145 *
146 * @handle: ACPI handle of the device contains _ART
147 * @art_count: the number of valid entries resulted from parsing _ART
148 * @artp: pointer to pointer of array of art entries in parsing result
149 * @create_dev: whether to create platform devices for target and source
150 *
151 */
152int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
153 bool create_dev)
154{
155 acpi_status status;
156 int result = 0;
157 int i;
158 int nr_bad_entries = 0;
159 struct art *arts;
160 struct acpi_device *adev;
161 union acpi_object *p;
162 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
163 struct acpi_buffer element = { 0, NULL };
164 struct acpi_buffer art_format = {
165 sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
166
167 if (!acpi_has_method(handle, "_ART"))
168 return 0;
169
170 status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
171 if (ACPI_FAILURE(status))
172 return -ENODEV;
173
174 p = buffer.pointer;
175 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
176 pr_err("Invalid _ART data\n");
177 result = -EFAULT;
178 goto end;
179 }
180
181 /* ignore p->package.elements[0], as this is _ART Revision field */
182 *art_count = p->package.count - 1;
183 arts = kzalloc(*art_count * sizeof(struct art), GFP_KERNEL);
184 if (!arts) {
185 result = -ENOMEM;
186 goto end;
187 }
188
189 for (i = 0; i < *art_count; i++) {
190 struct art *art = &arts[i - nr_bad_entries];
191
192 element.length = sizeof(struct art);
193 element.pointer = art;
194
195 status = acpi_extract_package(&(p->package.elements[i + 1]),
196 &art_format, &element);
197 if (ACPI_FAILURE(status)) {
198 pr_warn("_ART package %d is invalid, ignored", i);
199 nr_bad_entries++;
200 continue;
201 }
202 if (!create_dev)
203 continue;
204
205 if (art->source) {
206 result = acpi_bus_get_device(art->source, &adev);
207 if (!result)
208 acpi_create_platform_device(adev);
209 else
210 pr_warn("Failed to get source ACPI device\n");
211 }
212 if (art->target) {
213 result = acpi_bus_get_device(art->target, &adev);
214 if (!result)
215 acpi_create_platform_device(adev);
216 else
217 pr_warn("Failed to get source ACPI device\n");
218 }
219 }
220
221 *artp = arts;
222 /* don't count bad entries */
223 *art_count -= nr_bad_entries;
224end:
225 kfree(buffer.pointer);
226 return result;
227}
228EXPORT_SYMBOL(acpi_parse_art);
229
230
231/* get device name from acpi handle */
232static void get_single_name(acpi_handle handle, char *name)
233{
234 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
235
236 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)))
237 pr_warn("Failed get name from handle\n");
238 else {
239 memcpy(name, buffer.pointer, ACPI_NAME_SIZE);
240 kfree(buffer.pointer);
241 }
242}
243
244static int fill_art(char __user *ubuf)
245{
246 int i;
247 int ret;
248 int count;
249 int art_len;
250 struct art *arts = NULL;
251 union art_object *art_user;
252
253 ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false);
254 if (ret)
255 goto free_art;
256 art_len = count * sizeof(union art_object);
257 art_user = kzalloc(art_len, GFP_KERNEL);
258 if (!art_user) {
259 ret = -ENOMEM;
260 goto free_art;
261 }
262 /* now fill in user art data */
263 for (i = 0; i < count; i++) {
264 /* userspace art needs device name instead of acpi reference */
265 get_single_name(arts[i].source, art_user[i].source_device);
266 get_single_name(arts[i].target, art_user[i].target_device);
267 /* copy the rest int data in addition to source and target */
268 memcpy(&art_user[i].weight, &arts[i].weight,
269 sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2));
270 }
271
272 if (copy_to_user(ubuf, art_user, art_len))
273 ret = -EFAULT;
274 kfree(art_user);
275free_art:
276 kfree(arts);
277 return ret;
278}
279
280static int fill_trt(char __user *ubuf)
281{
282 int i;
283 int ret;
284 int count;
285 int trt_len;
286 struct trt *trts = NULL;
287 union trt_object *trt_user;
288
289 ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false);
290 if (ret)
291 goto free_trt;
292 trt_len = count * sizeof(union trt_object);
293 trt_user = kzalloc(trt_len, GFP_KERNEL);
294 if (!trt_user) {
295 ret = -ENOMEM;
296 goto free_trt;
297 }
298 /* now fill in user trt data */
299 for (i = 0; i < count; i++) {
300 /* userspace trt needs device name instead of acpi reference */
301 get_single_name(trts[i].source, trt_user[i].source_device);
302 get_single_name(trts[i].target, trt_user[i].target_device);
303 trt_user[i].sample_period = trts[i].sample_period;
304 trt_user[i].influence = trts[i].influence;
305 }
306
307 if (copy_to_user(ubuf, trt_user, trt_len))
308 ret = -EFAULT;
309 kfree(trt_user);
310free_trt:
311 kfree(trts);
312 return ret;
313}
314
315static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
316 unsigned long __arg)
317{
318 int ret = 0;
319 unsigned long length = 0;
320 unsigned long count = 0;
321 char __user *arg = (void __user *)__arg;
322 struct trt *trts;
323 struct art *arts;
324
325 switch (cmd) {
326 case ACPI_THERMAL_GET_TRT_COUNT:
327 ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
328 &trts, false);
329 kfree(trts);
330 if (!ret)
331 return put_user(count, (unsigned long __user *)__arg);
332 return ret;
333 case ACPI_THERMAL_GET_TRT_LEN:
334 ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
335 &trts, false);
336 kfree(trts);
337 length = count * sizeof(union trt_object);
338 if (!ret)
339 return put_user(length, (unsigned long __user *)__arg);
340 return ret;
341 case ACPI_THERMAL_GET_TRT:
342 return fill_trt(arg);
343 case ACPI_THERMAL_GET_ART_COUNT:
344 ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
345 &arts, false);
346 kfree(arts);
347 if (!ret)
348 return put_user(count, (unsigned long __user *)__arg);
349 return ret;
350 case ACPI_THERMAL_GET_ART_LEN:
351 ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
352 &arts, false);
353 kfree(arts);
354 length = count * sizeof(union art_object);
355 if (!ret)
356 return put_user(length, (unsigned long __user *)__arg);
357 return ret;
358
359 case ACPI_THERMAL_GET_ART:
360 return fill_art(arg);
361
362 default:
363 return -ENOTTY;
364 }
365}
366
367static const struct file_operations acpi_thermal_rel_fops = {
368 .owner = THIS_MODULE,
369 .open = acpi_thermal_rel_open,
370 .release = acpi_thermal_rel_release,
371 .unlocked_ioctl = acpi_thermal_rel_ioctl,
372 .llseek = no_llseek,
373};
374
375static struct miscdevice acpi_thermal_rel_misc_device = {
376 .minor = MISC_DYNAMIC_MINOR,
377 "acpi_thermal_rel",
378 &acpi_thermal_rel_fops
379};
380
381int acpi_thermal_rel_misc_device_add(acpi_handle handle)
382{
383 acpi_thermal_rel_handle = handle;
384
385 return misc_register(&acpi_thermal_rel_misc_device);
386}
387EXPORT_SYMBOL(acpi_thermal_rel_misc_device_add);
388
389int acpi_thermal_rel_misc_device_remove(acpi_handle handle)
390{
391 misc_deregister(&acpi_thermal_rel_misc_device);
392
393 return 0;
394}
395EXPORT_SYMBOL(acpi_thermal_rel_misc_device_remove);
396
397MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
398MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com");
399MODULE_DESCRIPTION("Intel acpi thermal rel misc dev driver");
400MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
new file mode 100644
index 000000000000..f00700bc9d79
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
@@ -0,0 +1,84 @@
1#ifndef __ACPI_ACPI_THERMAL_H
2#define __ACPI_ACPI_THERMAL_H
3
4#include <asm/ioctl.h>
5
6#define ACPI_THERMAL_MAGIC 's'
7
8#define ACPI_THERMAL_GET_TRT_LEN _IOR(ACPI_THERMAL_MAGIC, 1, unsigned long)
9#define ACPI_THERMAL_GET_ART_LEN _IOR(ACPI_THERMAL_MAGIC, 2, unsigned long)
10#define ACPI_THERMAL_GET_TRT_COUNT _IOR(ACPI_THERMAL_MAGIC, 3, unsigned long)
11#define ACPI_THERMAL_GET_ART_COUNT _IOR(ACPI_THERMAL_MAGIC, 4, unsigned long)
12
13#define ACPI_THERMAL_GET_TRT _IOR(ACPI_THERMAL_MAGIC, 5, unsigned long)
14#define ACPI_THERMAL_GET_ART _IOR(ACPI_THERMAL_MAGIC, 6, unsigned long)
15
16struct art {
17 acpi_handle source;
18 acpi_handle target;
19 u64 weight;
20 u64 ac0_max;
21 u64 ac1_max;
22 u64 ac2_max;
23 u64 ac3_max;
24 u64 ac4_max;
25 u64 ac5_max;
26 u64 ac6_max;
27 u64 ac7_max;
28 u64 ac8_max;
29 u64 ac9_max;
30} __packed;
31
32struct trt {
33 acpi_handle source;
34 acpi_handle target;
35 u64 influence;
36 u64 sample_period;
37 u64 reverved1;
38 u64 reverved2;
39 u64 reverved3;
40 u64 reverved4;
41} __packed;
42
43#define ACPI_NR_ART_ELEMENTS 13
44/* for usrspace */
45union art_object {
46 struct {
47 char source_device[8]; /* ACPI single name */
48 char target_device[8]; /* ACPI single name */
49 u64 weight;
50 u64 ac0_max_level;
51 u64 ac1_max_level;
52 u64 ac2_max_level;
53 u64 ac3_max_level;
54 u64 ac4_max_level;
55 u64 ac5_max_level;
56 u64 ac6_max_level;
57 u64 ac7_max_level;
58 u64 ac8_max_level;
59 u64 ac9_max_level;
60 };
61 u64 __data[ACPI_NR_ART_ELEMENTS];
62};
63
64union trt_object {
65 struct {
66 char source_device[8]; /* ACPI single name */
67 char target_device[8]; /* ACPI single name */
68 u64 influence;
69 u64 sample_period;
70 u64 reserved[4];
71 };
72 u64 __data[8];
73};
74
75#ifdef __KERNEL__
76int acpi_thermal_rel_misc_device_add(acpi_handle handle);
77int acpi_thermal_rel_misc_device_remove(acpi_handle handle);
78int acpi_parse_art(acpi_handle handle, int *art_count, struct art **arts,
79 bool create_dev);
80int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trts,
81 bool create_dev);
82#endif
83
84#endif /* __ACPI_ACPI_THERMAL_H */
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
new file mode 100644
index 000000000000..edc1cce117ba
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -0,0 +1,271 @@
1/*
2 * INT3400 thermal driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Zhang Rui <rui.zhang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/acpi.h>
16#include <linux/thermal.h>
17#include "acpi_thermal_rel.h"
18
19enum int3400_thermal_uuid {
20 INT3400_THERMAL_PASSIVE_1,
21 INT3400_THERMAL_PASSIVE_2,
22 INT3400_THERMAL_ACTIVE,
23 INT3400_THERMAL_CRITICAL,
24 INT3400_THERMAL_COOLING_MODE,
25 INT3400_THERMAL_MAXIMUM_UUID,
26};
27
28static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
29 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
30 "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
31 "3A95C389-E4B8-4629-A526-C52C88626BAE",
32 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
33 "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531",
34};
35
36struct int3400_thermal_priv {
37 struct acpi_device *adev;
38 struct thermal_zone_device *thermal;
39 int mode;
40 int art_count;
41 struct art *arts;
42 int trt_count;
43 struct trt *trts;
44 u8 uuid_bitmap;
45 int rel_misc_dev_res;
46};
47
48static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
49{
50 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL};
51 union acpi_object *obja, *objb;
52 int i, j;
53 int result = 0;
54 acpi_status status;
55
56 status = acpi_evaluate_object(priv->adev->handle, "IDSP", NULL, &buf);
57 if (ACPI_FAILURE(status))
58 return -ENODEV;
59
60 obja = (union acpi_object *)buf.pointer;
61 if (obja->type != ACPI_TYPE_PACKAGE) {
62 result = -EINVAL;
63 goto end;
64 }
65
66 for (i = 0; i < obja->package.count; i++) {
67 objb = &obja->package.elements[i];
68 if (objb->type != ACPI_TYPE_BUFFER) {
69 result = -EINVAL;
70 goto end;
71 }
72
73 /* UUID must be 16 bytes */
74 if (objb->buffer.length != 16) {
75 result = -EINVAL;
76 goto end;
77 }
78
79 for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
80 u8 uuid[16];
81
82 acpi_str_to_uuid(int3400_thermal_uuids[j], uuid);
83 if (!strncmp(uuid, objb->buffer.pointer, 16)) {
84 priv->uuid_bitmap |= (1 << j);
85 break;
86 }
87 }
88 }
89
90end:
91 kfree(buf.pointer);
92 return result;
93}
94
95static int int3400_thermal_run_osc(acpi_handle handle,
96 enum int3400_thermal_uuid uuid, bool enable)
97{
98 u32 ret, buf[2];
99 acpi_status status;
100 int result = 0;
101 struct acpi_osc_context context = {
102 .uuid_str = int3400_thermal_uuids[uuid],
103 .rev = 1,
104 .cap.length = 8,
105 };
106
107 buf[OSC_QUERY_DWORD] = 0;
108 buf[OSC_SUPPORT_DWORD] = enable;
109
110 context.cap.pointer = buf;
111
112 status = acpi_run_osc(handle, &context);
113 if (ACPI_SUCCESS(status)) {
114 ret = *((u32 *)(context.ret.pointer + 4));
115 if (ret != enable)
116 result = -EPERM;
117 } else
118 result = -EPERM;
119
120 kfree(context.ret.pointer);
121 return result;
122}
123
124static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
125 unsigned long *temp)
126{
127 *temp = 20 * 1000; /* faked temp sensor with 20C */
128 return 0;
129}
130
131static int int3400_thermal_get_mode(struct thermal_zone_device *thermal,
132 enum thermal_device_mode *mode)
133{
134 struct int3400_thermal_priv *priv = thermal->devdata;
135
136 if (!priv)
137 return -EINVAL;
138
139 *mode = priv->mode;
140
141 return 0;
142}
143
144static int int3400_thermal_set_mode(struct thermal_zone_device *thermal,
145 enum thermal_device_mode mode)
146{
147 struct int3400_thermal_priv *priv = thermal->devdata;
148 bool enable;
149 int result = 0;
150
151 if (!priv)
152 return -EINVAL;
153
154 if (mode == THERMAL_DEVICE_ENABLED)
155 enable = true;
156 else if (mode == THERMAL_DEVICE_DISABLED)
157 enable = false;
158 else
159 return -EINVAL;
160
161 if (enable != priv->mode) {
162 priv->mode = enable;
163 /* currently, only PASSIVE COOLING is supported */
164 result = int3400_thermal_run_osc(priv->adev->handle,
165 INT3400_THERMAL_PASSIVE_1, enable);
166 }
167 return result;
168}
169
170static struct thermal_zone_device_ops int3400_thermal_ops = {
171 .get_temp = int3400_thermal_get_temp,
172};
173
174static struct thermal_zone_params int3400_thermal_params = {
175 .governor_name = "user_space",
176 .no_hwmon = true,
177};
178
179static int int3400_thermal_probe(struct platform_device *pdev)
180{
181 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
182 struct int3400_thermal_priv *priv;
183 int result;
184
185 if (!adev)
186 return -ENODEV;
187
188 priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL);
189 if (!priv)
190 return -ENOMEM;
191
192 priv->adev = adev;
193
194 result = int3400_thermal_get_uuids(priv);
195 if (result)
196 goto free_priv;
197
198 result = acpi_parse_art(priv->adev->handle, &priv->art_count,
199 &priv->arts, true);
200 if (result)
201 goto free_priv;
202
203
204 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
205 &priv->trts, true);
206 if (result)
207 goto free_art;
208
209 platform_set_drvdata(pdev, priv);
210
211 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
212 int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
213 int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
214 }
215 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
216 priv, &int3400_thermal_ops,
217 &int3400_thermal_params, 0, 0);
218 if (IS_ERR(priv->thermal)) {
219 result = PTR_ERR(priv->thermal);
220 goto free_trt;
221 }
222
223 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
224 priv->adev->handle);
225
226 return 0;
227free_trt:
228 kfree(priv->trts);
229free_art:
230 kfree(priv->arts);
231free_priv:
232 kfree(priv);
233 return result;
234}
235
236static int int3400_thermal_remove(struct platform_device *pdev)
237{
238 struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
239
240 if (!priv->rel_misc_dev_res)
241 acpi_thermal_rel_misc_device_remove(priv->adev->handle);
242
243 thermal_zone_device_unregister(priv->thermal);
244 kfree(priv->trts);
245 kfree(priv->arts);
246 kfree(priv);
247 return 0;
248}
249
250static const struct acpi_device_id int3400_thermal_match[] = {
251 {"INT3400", 0},
252 {}
253};
254
255MODULE_DEVICE_TABLE(acpi, int3400_thermal_match);
256
257static struct platform_driver int3400_thermal_driver = {
258 .probe = int3400_thermal_probe,
259 .remove = int3400_thermal_remove,
260 .driver = {
261 .name = "int3400 thermal",
262 .owner = THIS_MODULE,
263 .acpi_match_table = ACPI_PTR(int3400_thermal_match),
264 },
265};
266
267module_platform_driver(int3400_thermal_driver);
268
269MODULE_DESCRIPTION("INT3400 Thermal driver");
270MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
271MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
new file mode 100644
index 000000000000..a5d08c14ba24
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -0,0 +1,242 @@
1/*
2 * INT3402 thermal driver for memory temperature reporting
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Aaron Lu <aaron.lu@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/acpi.h>
16#include <linux/thermal.h>
17
18#define ACPI_ACTIVE_COOLING_MAX_NR 10
19
20struct active_trip {
21 unsigned long temp;
22 int id;
23 bool valid;
24};
25
26struct int3402_thermal_data {
27 unsigned long *aux_trips;
28 int aux_trip_nr;
29 unsigned long psv_temp;
30 int psv_trip_id;
31 unsigned long crt_temp;
32 int crt_trip_id;
33 unsigned long hot_temp;
34 int hot_trip_id;
35 struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR];
36 acpi_handle *handle;
37};
38
39static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone,
40 unsigned long *temp)
41{
42 struct int3402_thermal_data *d = zone->devdata;
43 unsigned long long tmp;
44 acpi_status status;
45
46 status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp);
47 if (ACPI_FAILURE(status))
48 return -ENODEV;
49
50 /* _TMP returns the temperature in tenths of degrees Kelvin */
51 *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
52
53 return 0;
54}
55
56static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone,
57 int trip, unsigned long *temp)
58{
59 struct int3402_thermal_data *d = zone->devdata;
60 int i;
61
62 if (trip < d->aux_trip_nr)
63 *temp = d->aux_trips[trip];
64 else if (trip == d->crt_trip_id)
65 *temp = d->crt_temp;
66 else if (trip == d->psv_trip_id)
67 *temp = d->psv_temp;
68 else if (trip == d->hot_trip_id)
69 *temp = d->hot_temp;
70 else {
71 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
72 if (d->act_trips[i].valid &&
73 d->act_trips[i].id == trip) {
74 *temp = d->act_trips[i].temp;
75 break;
76 }
77 }
78 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
79 return -EINVAL;
80 }
81 return 0;
82}
83
84static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone,
85 int trip, enum thermal_trip_type *type)
86{
87 struct int3402_thermal_data *d = zone->devdata;
88 int i;
89
90 if (trip < d->aux_trip_nr)
91 *type = THERMAL_TRIP_PASSIVE;
92 else if (trip == d->crt_trip_id)
93 *type = THERMAL_TRIP_CRITICAL;
94 else if (trip == d->hot_trip_id)
95 *type = THERMAL_TRIP_HOT;
96 else if (trip == d->psv_trip_id)
97 *type = THERMAL_TRIP_PASSIVE;
98 else {
99 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
100 if (d->act_trips[i].valid &&
101 d->act_trips[i].id == trip) {
102 *type = THERMAL_TRIP_ACTIVE;
103 break;
104 }
105 }
106 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
107 return -EINVAL;
108 }
109 return 0;
110}
111
112static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip,
113 unsigned long temp)
114{
115 struct int3402_thermal_data *d = zone->devdata;
116 acpi_status status;
117 char name[10];
118
119 snprintf(name, sizeof(name), "PAT%d", trip);
120 status = acpi_execute_simple_method(d->handle, name,
121 MILLICELSIUS_TO_DECI_KELVIN(temp));
122 if (ACPI_FAILURE(status))
123 return -EIO;
124
125 d->aux_trips[trip] = temp;
126 return 0;
127}
128
129static struct thermal_zone_device_ops int3402_thermal_zone_ops = {
130 .get_temp = int3402_thermal_get_zone_temp,
131 .get_trip_temp = int3402_thermal_get_trip_temp,
132 .get_trip_type = int3402_thermal_get_trip_type,
133 .set_trip_temp = int3402_thermal_set_trip_temp,
134};
135
136static struct thermal_zone_params int3402_thermal_params = {
137 .governor_name = "user_space",
138 .no_hwmon = true,
139};
140
141static int int3402_thermal_get_temp(acpi_handle handle, char *name,
142 unsigned long *temp)
143{
144 unsigned long long r;
145 acpi_status status;
146
147 status = acpi_evaluate_integer(handle, name, NULL, &r);
148 if (ACPI_FAILURE(status))
149 return -EIO;
150
151 *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
152 return 0;
153}
154
155static int int3402_thermal_probe(struct platform_device *pdev)
156{
157 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
158 struct int3402_thermal_data *d;
159 struct thermal_zone_device *zone;
160 acpi_status status;
161 unsigned long long trip_cnt;
162 int trip_mask = 0, i;
163
164 if (!acpi_has_method(adev->handle, "_TMP"))
165 return -ENODEV;
166
167 d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
168 if (!d)
169 return -ENOMEM;
170
171 status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
172 if (ACPI_FAILURE(status))
173 trip_cnt = 0;
174 else {
175 d->aux_trips = devm_kzalloc(&pdev->dev,
176 sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL);
177 if (!d->aux_trips)
178 return -ENOMEM;
179 trip_mask = trip_cnt - 1;
180 d->handle = adev->handle;
181 d->aux_trip_nr = trip_cnt;
182 }
183
184 d->crt_trip_id = -1;
185 if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp))
186 d->crt_trip_id = trip_cnt++;
187 d->hot_trip_id = -1;
188 if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp))
189 d->hot_trip_id = trip_cnt++;
190 d->psv_trip_id = -1;
191 if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp))
192 d->psv_trip_id = trip_cnt++;
193 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
194 char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
195 if (int3402_thermal_get_temp(adev->handle, name,
196 &d->act_trips[i].temp))
197 break;
198 d->act_trips[i].id = trip_cnt++;
199 d->act_trips[i].valid = true;
200 }
201
202 zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt,
203 trip_mask, d,
204 &int3402_thermal_zone_ops,
205 &int3402_thermal_params,
206 0, 0);
207 if (IS_ERR(zone))
208 return PTR_ERR(zone);
209 platform_set_drvdata(pdev, zone);
210
211 return 0;
212}
213
214static int int3402_thermal_remove(struct platform_device *pdev)
215{
216 struct thermal_zone_device *zone = platform_get_drvdata(pdev);
217
218 thermal_zone_device_unregister(zone);
219 return 0;
220}
221
222static const struct acpi_device_id int3402_thermal_match[] = {
223 {"INT3402", 0},
224 {}
225};
226
227MODULE_DEVICE_TABLE(acpi, int3402_thermal_match);
228
229static struct platform_driver int3402_thermal_driver = {
230 .probe = int3402_thermal_probe,
231 .remove = int3402_thermal_remove,
232 .driver = {
233 .name = "int3402 thermal",
234 .owner = THIS_MODULE,
235 .acpi_match_table = int3402_thermal_match,
236 },
237};
238
239module_platform_driver(int3402_thermal_driver);
240
241MODULE_DESCRIPTION("INT3402 Thermal driver");
242MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
new file mode 100644
index 000000000000..d20dba986f0f
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -0,0 +1,477 @@
1/*
2 * ACPI INT3403 thermal driver
3 * Copyright (c) 2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/acpi.h>
20#include <linux/thermal.h>
21#include <linux/platform_device.h>
22
23#define INT3403_TYPE_SENSOR 0x03
24#define INT3403_TYPE_CHARGER 0x0B
25#define INT3403_TYPE_BATTERY 0x0C
26#define INT3403_PERF_CHANGED_EVENT 0x80
27#define INT3403_THERMAL_EVENT 0x90
28
29#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
30#define KELVIN_OFFSET 2732
31#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
32
33struct int3403_sensor {
34 struct thermal_zone_device *tzone;
35 unsigned long *thresholds;
36 unsigned long crit_temp;
37 int crit_trip_id;
38 unsigned long psv_temp;
39 int psv_trip_id;
40
41};
42
43struct int3403_performance_state {
44 u64 performance;
45 u64 power;
46 u64 latency;
47 u64 linear;
48 u64 control;
49 u64 raw_performace;
50 char *raw_unit;
51 int reserved;
52};
53
54struct int3403_cdev {
55 struct thermal_cooling_device *cdev;
56 unsigned long max_state;
57};
58
59struct int3403_priv {
60 struct platform_device *pdev;
61 struct acpi_device *adev;
62 unsigned long long type;
63 void *priv;
64};
65
66static int sys_get_curr_temp(struct thermal_zone_device *tzone,
67 unsigned long *temp)
68{
69 struct int3403_priv *priv = tzone->devdata;
70 struct acpi_device *device = priv->adev;
71 unsigned long long tmp;
72 acpi_status status;
73
74 status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
75 if (ACPI_FAILURE(status))
76 return -EIO;
77
78 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
79
80 return 0;
81}
82
83static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
84 int trip, unsigned long *temp)
85{
86 struct int3403_priv *priv = tzone->devdata;
87 struct acpi_device *device = priv->adev;
88 unsigned long long hyst;
89 acpi_status status;
90
91 status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
92 if (ACPI_FAILURE(status))
93 return -EIO;
94
95 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
96
97 return 0;
98}
99
100static int sys_get_trip_temp(struct thermal_zone_device *tzone,
101 int trip, unsigned long *temp)
102{
103 struct int3403_priv *priv = tzone->devdata;
104 struct int3403_sensor *obj = priv->priv;
105
106 if (priv->type != INT3403_TYPE_SENSOR || !obj)
107 return -EINVAL;
108
109 if (trip == obj->crit_trip_id)
110 *temp = obj->crit_temp;
111 else if (trip == obj->psv_trip_id)
112 *temp = obj->psv_temp;
113 else {
114 /*
115 * get_trip_temp is a mandatory callback but
116 * PATx method doesn't return any value, so return
117 * cached value, which was last set from user space
118 */
119 *temp = obj->thresholds[trip];
120 }
121
122 return 0;
123}
124
125static int sys_get_trip_type(struct thermal_zone_device *thermal,
126 int trip, enum thermal_trip_type *type)
127{
128 struct int3403_priv *priv = thermal->devdata;
129 struct int3403_sensor *obj = priv->priv;
130
131 /* Mandatory callback, may not mean much here */
132 if (trip == obj->crit_trip_id)
133 *type = THERMAL_TRIP_CRITICAL;
134 else
135 *type = THERMAL_TRIP_PASSIVE;
136
137 return 0;
138}
139
140int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
141 unsigned long temp)
142{
143 struct int3403_priv *priv = tzone->devdata;
144 struct acpi_device *device = priv->adev;
145 struct int3403_sensor *obj = priv->priv;
146 acpi_status status;
147 char name[10];
148 int ret = 0;
149
150 snprintf(name, sizeof(name), "PAT%d", trip);
151 if (acpi_has_method(device->handle, name)) {
152 status = acpi_execute_simple_method(device->handle, name,
153 MILLI_CELSIUS_TO_DECI_KELVIN(temp,
154 KELVIN_OFFSET));
155 if (ACPI_FAILURE(status))
156 ret = -EIO;
157 else
158 obj->thresholds[trip] = temp;
159 } else {
160 ret = -EIO;
161 dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
162 }
163
164 return ret;
165}
166
167static struct thermal_zone_device_ops tzone_ops = {
168 .get_temp = sys_get_curr_temp,
169 .get_trip_temp = sys_get_trip_temp,
170 .get_trip_type = sys_get_trip_type,
171 .set_trip_temp = sys_set_trip_temp,
172 .get_trip_hyst = sys_get_trip_hyst,
173};
174
175static struct thermal_zone_params int3403_thermal_params = {
176 .governor_name = "user_space",
177 .no_hwmon = true,
178};
179
180static void int3403_notify(acpi_handle handle,
181 u32 event, void *data)
182{
183 struct int3403_priv *priv = data;
184 struct int3403_sensor *obj;
185
186 if (!priv)
187 return;
188
189 obj = priv->priv;
190 if (priv->type != INT3403_TYPE_SENSOR || !obj)
191 return;
192
193 switch (event) {
194 case INT3403_PERF_CHANGED_EVENT:
195 break;
196 case INT3403_THERMAL_EVENT:
197 thermal_zone_device_update(obj->tzone);
198 break;
199 default:
200 dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
201 break;
202 }
203}
204
205static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
206{
207 unsigned long long crt;
208 acpi_status status;
209
210 status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
211 if (ACPI_FAILURE(status))
212 return -EIO;
213
214 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
215
216 return 0;
217}
218
219static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
220{
221 unsigned long long psv;
222 acpi_status status;
223
224 status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
225 if (ACPI_FAILURE(status))
226 return -EIO;
227
228 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
229
230 return 0;
231}
232
233static int int3403_sensor_add(struct int3403_priv *priv)
234{
235 int result = 0;
236 acpi_status status;
237 struct int3403_sensor *obj;
238 unsigned long long trip_cnt;
239 int trip_mask = 0;
240
241 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
242 if (!obj)
243 return -ENOMEM;
244
245 priv->priv = obj;
246
247 status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL,
248 &trip_cnt);
249 if (ACPI_FAILURE(status))
250 trip_cnt = 0;
251
252 if (trip_cnt) {
253 /* We have to cache, thresholds can't be readback */
254 obj->thresholds = devm_kzalloc(&priv->pdev->dev,
255 sizeof(*obj->thresholds) * trip_cnt,
256 GFP_KERNEL);
257 if (!obj->thresholds) {
258 result = -ENOMEM;
259 goto err_free_obj;
260 }
261 trip_mask = BIT(trip_cnt) - 1;
262 }
263
264 obj->psv_trip_id = -1;
265 if (!sys_get_trip_psv(priv->adev, &obj->psv_temp))
266 obj->psv_trip_id = trip_cnt++;
267
268 obj->crit_trip_id = -1;
269 if (!sys_get_trip_crt(priv->adev, &obj->crit_temp))
270 obj->crit_trip_id = trip_cnt++;
271
272 obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev),
273 trip_cnt, trip_mask, priv, &tzone_ops,
274 &int3403_thermal_params, 0, 0);
275 if (IS_ERR(obj->tzone)) {
276 result = PTR_ERR(obj->tzone);
277 obj->tzone = NULL;
278 goto err_free_obj;
279 }
280
281 result = acpi_install_notify_handler(priv->adev->handle,
282 ACPI_DEVICE_NOTIFY, int3403_notify,
283 (void *)priv);
284 if (result)
285 goto err_free_obj;
286
287 return 0;
288
289 err_free_obj:
290 if (obj->tzone)
291 thermal_zone_device_unregister(obj->tzone);
292 return result;
293}
294
295static int int3403_sensor_remove(struct int3403_priv *priv)
296{
297 struct int3403_sensor *obj = priv->priv;
298
299 thermal_zone_device_unregister(obj->tzone);
300 return 0;
301}
302
303/* INT3403 Cooling devices */
304static int int3403_get_max_state(struct thermal_cooling_device *cdev,
305 unsigned long *state)
306{
307 struct int3403_priv *priv = cdev->devdata;
308 struct int3403_cdev *obj = priv->priv;
309
310 *state = obj->max_state;
311 return 0;
312}
313
314static int int3403_get_cur_state(struct thermal_cooling_device *cdev,
315 unsigned long *state)
316{
317 struct int3403_priv *priv = cdev->devdata;
318 unsigned long long level;
319 acpi_status status;
320
321 status = acpi_evaluate_integer(priv->adev->handle, "PPPC", NULL, &level);
322 if (ACPI_SUCCESS(status)) {
323 *state = level;
324 return 0;
325 } else
326 return -EINVAL;
327}
328
329static int
330int3403_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
331{
332 struct int3403_priv *priv = cdev->devdata;
333 acpi_status status;
334
335 status = acpi_execute_simple_method(priv->adev->handle, "SPPC", state);
336 if (ACPI_SUCCESS(status))
337 return 0;
338 else
339 return -EINVAL;
340}
341
342static const struct thermal_cooling_device_ops int3403_cooling_ops = {
343 .get_max_state = int3403_get_max_state,
344 .get_cur_state = int3403_get_cur_state,
345 .set_cur_state = int3403_set_cur_state,
346};
347
348static int int3403_cdev_add(struct int3403_priv *priv)
349{
350 int result = 0;
351 acpi_status status;
352 struct int3403_cdev *obj;
353 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
354 union acpi_object *p;
355
356 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
357 if (!obj)
358 return -ENOMEM;
359
360 status = acpi_evaluate_object(priv->adev->handle, "PPSS", NULL, &buf);
361 if (ACPI_FAILURE(status))
362 return -ENODEV;
363
364 p = buf.pointer;
365 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
366 printk(KERN_WARNING "Invalid PPSS data\n");
367 return -EFAULT;
368 }
369
370 obj->max_state = p->package.count - 1;
371 obj->cdev =
372 thermal_cooling_device_register(acpi_device_bid(priv->adev),
373 priv, &int3403_cooling_ops);
374 if (IS_ERR(obj->cdev))
375 result = PTR_ERR(obj->cdev);
376
377 priv->priv = obj;
378
379 /* TODO: add ACPI notification support */
380
381 return result;
382}
383
384static int int3403_cdev_remove(struct int3403_priv *priv)
385{
386 struct int3403_cdev *obj = priv->priv;
387
388 thermal_cooling_device_unregister(obj->cdev);
389 return 0;
390}
391
392static int int3403_add(struct platform_device *pdev)
393{
394 struct int3403_priv *priv;
395 int result = 0;
396 acpi_status status;
397
398 priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv),
399 GFP_KERNEL);
400 if (!priv)
401 return -ENOMEM;
402
403 priv->pdev = pdev;
404 priv->adev = ACPI_COMPANION(&(pdev->dev));
405 if (!priv->adev) {
406 result = -EINVAL;
407 goto err;
408 }
409
410 status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
411 NULL, &priv->type);
412 if (ACPI_FAILURE(status)) {
413 result = -EINVAL;
414 goto err;
415 }
416
417 platform_set_drvdata(pdev, priv);
418 switch (priv->type) {
419 case INT3403_TYPE_SENSOR:
420 result = int3403_sensor_add(priv);
421 break;
422 case INT3403_TYPE_CHARGER:
423 case INT3403_TYPE_BATTERY:
424 result = int3403_cdev_add(priv);
425 break;
426 default:
427 result = -EINVAL;
428 }
429
430 if (result)
431 goto err;
432 return result;
433
434err:
435 return result;
436}
437
438static int int3403_remove(struct platform_device *pdev)
439{
440 struct int3403_priv *priv = platform_get_drvdata(pdev);
441
442 switch (priv->type) {
443 case INT3403_TYPE_SENSOR:
444 int3403_sensor_remove(priv);
445 break;
446 case INT3403_TYPE_CHARGER:
447 case INT3403_TYPE_BATTERY:
448 int3403_cdev_remove(priv);
449 break;
450 default:
451 break;
452 }
453
454 return 0;
455}
456
457static const struct acpi_device_id int3403_device_ids[] = {
458 {"INT3403", 0},
459 {"", 0},
460};
461MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
462
463static struct platform_driver int3403_driver = {
464 .probe = int3403_add,
465 .remove = int3403_remove,
466 .driver = {
467 .name = "int3403 thermal",
468 .owner = THIS_MODULE,
469 .acpi_match_table = int3403_device_ids,
470 },
471};
472
473module_platform_driver(int3403_driver);
474
475MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
476MODULE_LICENSE("GPL v2");
477MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4b2b999b7611..f8eb625b8400 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -401,6 +401,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
401 struct of_phandle_args sensor_specs; 401 struct of_phandle_args sensor_specs;
402 int ret, id; 402 int ret, id;
403 403
404 /* Check whether child is enabled or not */
405 if (!of_device_is_available(child))
406 continue;
407
404 /* For now, thermal framework supports only 1 sensor per zone */ 408 /* For now, thermal framework supports only 1 sensor per zone */
405 ret = of_parse_phandle_with_args(child, "thermal-sensors", 409 ret = of_parse_phandle_with_args(child, "thermal-sensors",
406 "#thermal-sensor-cells", 410 "#thermal-sensor-cells",
@@ -771,6 +775,10 @@ int __init of_parse_thermal_zones(void)
771 struct thermal_zone_device *zone; 775 struct thermal_zone_device *zone;
772 struct thermal_zone_params *tzp; 776 struct thermal_zone_params *tzp;
773 777
778 /* Check whether child is enabled or not */
779 if (!of_device_is_available(child))
780 continue;
781
774 tz = thermal_of_build_thermal_zone(child); 782 tz = thermal_of_build_thermal_zone(child);
775 if (IS_ERR(tz)) { 783 if (IS_ERR(tz)) {
776 pr_err("failed to build thermal zone %s: %ld\n", 784 pr_err("failed to build thermal zone %s: %ld\n",
@@ -838,6 +846,10 @@ void of_thermal_destroy_zones(void)
838 for_each_child_of_node(np, child) { 846 for_each_child_of_node(np, child) {
839 struct thermal_zone_device *zone; 847 struct thermal_zone_device *zone;
840 848
849 /* Check whether child is enabled or not */
850 if (!of_device_is_available(child))
851 continue;
852
841 zone = thermal_zone_get_zone_by_name(child->name); 853 zone = thermal_zone_get_zone_by_name(child->name);
842 if (IS_ERR(zone)) 854 if (IS_ERR(zone))
843 continue; 855 continue;
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index f251521baaa2..fdd1f523a1ed 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/thermal.h> 25#include <linux/thermal.h>
26#include <trace/events/thermal.h>
26 27
27#include "thermal_core.h" 28#include "thermal_core.h"
28 29
@@ -76,7 +77,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
76 next_target = instance->upper; 77 next_target = instance->upper;
77 break; 78 break;
78 case THERMAL_TREND_DROPPING: 79 case THERMAL_TREND_DROPPING:
79 if (cur_state == instance->lower) { 80 if (cur_state <= instance->lower) {
80 if (!throttle) 81 if (!throttle)
81 next_target = THERMAL_NO_TARGET; 82 next_target = THERMAL_NO_TARGET;
82 } else { 83 } else {
@@ -129,8 +130,10 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
129 130
130 trend = get_tz_trend(tz, trip); 131 trend = get_tz_trend(tz, trip);
131 132
132 if (tz->temperature >= trip_temp) 133 if (tz->temperature >= trip_temp) {
133 throttle = true; 134 throttle = true;
135 trace_thermal_zone_trip(tz, trip, trip_type);
136 }
134 137
135 dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n", 138 dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
136 trip, trip_type, trip_temp, trend, throttle); 139 trip, trip_type, trip_temp, trend, throttle);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 1e23f4f8d2c2..9bf10aa6069b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -38,6 +38,9 @@
38#include <net/netlink.h> 38#include <net/netlink.h>
39#include <net/genetlink.h> 39#include <net/genetlink.h>
40 40
41#define CREATE_TRACE_POINTS
42#include <trace/events/thermal.h>
43
41#include "thermal_core.h" 44#include "thermal_core.h"
42#include "thermal_hwmon.h" 45#include "thermal_hwmon.h"
43 46
@@ -368,6 +371,8 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
368 if (tz->temperature < trip_temp) 371 if (tz->temperature < trip_temp)
369 return; 372 return;
370 373
374 trace_thermal_zone_trip(tz, trip, trip_type);
375
371 if (tz->ops->notify) 376 if (tz->ops->notify)
372 tz->ops->notify(tz, trip, trip_type); 377 tz->ops->notify(tz, trip, trip_type);
373 378
@@ -463,6 +468,7 @@ static void update_temperature(struct thermal_zone_device *tz)
463 tz->temperature = temp; 468 tz->temperature = temp;
464 mutex_unlock(&tz->lock); 469 mutex_unlock(&tz->lock);
465 470
471 trace_thermal_temperature(tz);
466 dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", 472 dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
467 tz->last_temperature, tz->temperature); 473 tz->last_temperature, tz->temperature);
468} 474}
@@ -1287,6 +1293,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
1287 mutex_unlock(&cdev->lock); 1293 mutex_unlock(&cdev->lock);
1288 cdev->ops->set_cur_state(cdev, target); 1294 cdev->ops->set_cur_state(cdev, target);
1289 cdev->updated = true; 1295 cdev->updated = true;
1296 trace_cdev_update(cdev, target);
1290 dev_dbg(&cdev->device, "set to state %lu\n", target); 1297 dev_dbg(&cdev->device, "set to state %lu\n", target);
1291} 1298}
1292EXPORT_SYMBOL(thermal_cdev_update); 1299EXPORT_SYMBOL(thermal_cdev_update);
@@ -1790,6 +1797,10 @@ static int __init thermal_register_governors(void)
1790 if (result) 1797 if (result)
1791 return result; 1798 return result;
1792 1799
1800 result = thermal_gov_bang_bang_register();
1801 if (result)
1802 return result;
1803
1793 return thermal_gov_user_space_register(); 1804 return thermal_gov_user_space_register();
1794} 1805}
1795 1806
@@ -1797,6 +1808,7 @@ static void thermal_unregister_governors(void)
1797{ 1808{
1798 thermal_gov_step_wise_unregister(); 1809 thermal_gov_step_wise_unregister();
1799 thermal_gov_fair_share_unregister(); 1810 thermal_gov_fair_share_unregister();
1811 thermal_gov_bang_bang_unregister();
1800 thermal_gov_user_space_unregister(); 1812 thermal_gov_user_space_unregister();
1801} 1813}
1802 1814
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 3db339fb636f..d15d243de27a 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -69,6 +69,14 @@ static inline int thermal_gov_fair_share_register(void) { return 0; }
69static inline void thermal_gov_fair_share_unregister(void) {} 69static inline void thermal_gov_fair_share_unregister(void) {}
70#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */ 70#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */
71 71
72#ifdef CONFIG_THERMAL_GOV_BANG_BANG
73int thermal_gov_bang_bang_register(void);
74void thermal_gov_bang_bang_unregister(void);
75#else
76static inline int thermal_gov_bang_bang_register(void) { return 0; }
77static inline void thermal_gov_bang_bang_unregister(void) {}
78#endif /* CONFIG_THERMAL_GOV_BANG_BANG */
79
72#ifdef CONFIG_THERMAL_GOV_USER_SPACE 80#ifdef CONFIG_THERMAL_GOV_USER_SPACE
73int thermal_gov_user_space_register(void); 81int thermal_gov_user_space_register(void);
74void thermal_gov_user_space_unregister(void); 82void thermal_gov_user_space_unregister(void);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a673e5b6a2e0..60fa6278fbce 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -28,18 +28,6 @@
28 28
29#define UIO_MAX_DEVICES (1U << MINORBITS) 29#define UIO_MAX_DEVICES (1U << MINORBITS)
30 30
31struct uio_device {
32 struct module *owner;
33 struct device *dev;
34 int minor;
35 atomic_t event;
36 struct fasync_struct *async_queue;
37 wait_queue_head_t wait;
38 struct uio_info *info;
39 struct kobject *map_dir;
40 struct kobject *portio_dir;
41};
42
43static int uio_major; 31static int uio_major;
44static struct cdev *uio_cdev; 32static struct cdev *uio_cdev;
45static DEFINE_IDR(uio_idr); 33static DEFINE_IDR(uio_idr);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 57b1d44acbfe..eb976ee3a02f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -448,8 +448,10 @@ static int __init fb_console_setup(char *this_opt)
448 return 1; 448 return 1;
449 449
450 while ((options = strsep(&this_opt, ",")) != NULL) { 450 while ((options = strsep(&this_opt, ",")) != NULL) {
451 if (!strncmp(options, "font:", 5)) 451 if (!strncmp(options, "font:", 5)) {
452 strlcpy(fontname, options + 5, sizeof(fontname)); 452 strlcpy(fontname, options + 5, sizeof(fontname));
453 continue;
454 }
453 455
454 if (!strncmp(options, "scrollback:", 11)) { 456 if (!strncmp(options, "scrollback:", 11)) {
455 options += 11; 457 options += 11;
@@ -457,13 +459,9 @@ static int __init fb_console_setup(char *this_opt)
457 fbcon_softback_size = simple_strtoul(options, &options, 0); 459 fbcon_softback_size = simple_strtoul(options, &options, 0);
458 if (*options == 'k' || *options == 'K') { 460 if (*options == 'k' || *options == 'K') {
459 fbcon_softback_size *= 1024; 461 fbcon_softback_size *= 1024;
460 options++;
461 } 462 }
462 if (*options != ',') 463 }
463 return 1; 464 continue;
464 options++;
465 } else
466 return 1;
467 } 465 }
468 466
469 if (!strncmp(options, "map:", 4)) { 467 if (!strncmp(options, "map:", 4)) {
@@ -478,8 +476,7 @@ static int __init fb_console_setup(char *this_opt)
478 476
479 fbcon_map_override(); 477 fbcon_map_override();
480 } 478 }
481 479 continue;
482 return 1;
483 } 480 }
484 481
485 if (!strncmp(options, "vc:", 3)) { 482 if (!strncmp(options, "vc:", 3)) {
@@ -491,7 +488,8 @@ static int __init fb_console_setup(char *this_opt)
491 if (*options++ == '-') 488 if (*options++ == '-')
492 last_fb_vc = simple_strtoul(options, &options, 10) - 1; 489 last_fb_vc = simple_strtoul(options, &options, 10) - 1;
493 fbcon_is_default = 0; 490 fbcon_is_default = 0;
494 } 491 continue;
492 }
495 493
496 if (!strncmp(options, "rotate:", 7)) { 494 if (!strncmp(options, "rotate:", 7)) {
497 options += 7; 495 options += 7;
@@ -499,6 +497,7 @@ static int __init fb_console_setup(char *this_opt)
499 initial_rotation = simple_strtoul(options, &options, 0); 497 initial_rotation = simple_strtoul(options, &options, 0);
500 if (initial_rotation > 3) 498 if (initial_rotation > 3)
501 initial_rotation = 0; 499 initial_rotation = 0;
500 continue;
502 } 501 }
503 } 502 }
504 return 1; 503 return 1;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 6e6aa704fe84..517f565b65d7 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -56,7 +56,7 @@ static int cursor_size_lastfrom;
56static int cursor_size_lastto; 56static int cursor_size_lastto;
57static u32 vgacon_xres; 57static u32 vgacon_xres;
58static u32 vgacon_yres; 58static u32 vgacon_yres;
59static struct vgastate state; 59static struct vgastate vgastate;
60 60
61#define BLANK 0x0020 61#define BLANK 0x0020
62 62
@@ -400,7 +400,7 @@ static const char *vgacon_startup(void)
400 400
401 vga_video_num_lines = screen_info.orig_video_lines; 401 vga_video_num_lines = screen_info.orig_video_lines;
402 vga_video_num_columns = screen_info.orig_video_cols; 402 vga_video_num_columns = screen_info.orig_video_cols;
403 state.vgabase = NULL; 403 vgastate.vgabase = NULL;
404 404
405 if (screen_info.orig_video_mode == 7) { 405 if (screen_info.orig_video_mode == 7) {
406 /* Monochrome display */ 406 /* Monochrome display */
@@ -851,12 +851,12 @@ static void vga_set_palette(struct vc_data *vc, unsigned char *table)
851{ 851{
852 int i, j; 852 int i, j;
853 853
854 vga_w(state.vgabase, VGA_PEL_MSK, 0xff); 854 vga_w(vgastate.vgabase, VGA_PEL_MSK, 0xff);
855 for (i = j = 0; i < 16; i++) { 855 for (i = j = 0; i < 16; i++) {
856 vga_w(state.vgabase, VGA_PEL_IW, table[i]); 856 vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]);
857 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 857 vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
858 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 858 vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
859 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 859 vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
860 } 860 }
861} 861}
862 862
@@ -1008,7 +1008,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
1008 switch (blank) { 1008 switch (blank) {
1009 case 0: /* Unblank */ 1009 case 0: /* Unblank */
1010 if (vga_vesa_blanked) { 1010 if (vga_vesa_blanked) {
1011 vga_vesa_unblank(&state); 1011 vga_vesa_unblank(&vgastate);
1012 vga_vesa_blanked = 0; 1012 vga_vesa_blanked = 0;
1013 } 1013 }
1014 if (vga_palette_blanked) { 1014 if (vga_palette_blanked) {
@@ -1022,7 +1022,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
1022 case 1: /* Normal blanking */ 1022 case 1: /* Normal blanking */
1023 case -1: /* Obsolete */ 1023 case -1: /* Obsolete */
1024 if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { 1024 if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
1025 vga_pal_blank(&state); 1025 vga_pal_blank(&vgastate);
1026 vga_palette_blanked = 1; 1026 vga_palette_blanked = 1;
1027 return 0; 1027 return 0;
1028 } 1028 }
@@ -1034,7 +1034,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
1034 return 1; 1034 return 1;
1035 default: /* VESA blanking */ 1035 default: /* VESA blanking */
1036 if (vga_video_type == VIDEO_TYPE_VGAC) { 1036 if (vga_video_type == VIDEO_TYPE_VGAC) {
1037 vga_vesa_blank(&state, blank - 1); 1037 vga_vesa_blank(&vgastate, blank - 1);
1038 vga_vesa_blanked = blank; 1038 vga_vesa_blanked = blank;
1039 } 1039 }
1040 return 0; 1040 return 0;
@@ -1280,7 +1280,7 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigne
1280 (charcount != 256 && charcount != 512)) 1280 (charcount != 256 && charcount != 512))
1281 return -EINVAL; 1281 return -EINVAL;
1282 1282
1283 rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512); 1283 rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512);
1284 if (rc) 1284 if (rc)
1285 return rc; 1285 return rc;
1286 1286
@@ -1299,7 +1299,7 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
1299 font->charcount = vga_512_chars ? 512 : 256; 1299 font->charcount = vga_512_chars ? 512 : 256;
1300 if (!font->data) 1300 if (!font->data)
1301 return 0; 1301 return 0;
1302 return vgacon_do_font_op(&state, font->data, 0, vga_512_chars); 1302 return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars);
1303} 1303}
1304 1304
1305#else 1305#else
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 3bf403150a2d..9ec81d46fc57 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -27,7 +27,6 @@
27#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
28#include <video/videomode.h> 28#include <video/videomode.h>
29 29
30#include <mach/cpu.h>
31#include <asm/gpio.h> 30#include <asm/gpio.h>
32 31
33#include <video/atmel_lcdc.h> 32#include <video/atmel_lcdc.h>
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
index 5ee3b5505f7f..91921665b98b 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
@@ -301,6 +301,8 @@ static const struct of_device_id tvc_of_match[] = {
301 {}, 301 {},
302}; 302};
303 303
304MODULE_DEVICE_TABLE(of, tvc_of_match);
305
304static struct platform_driver tvc_connector_driver = { 306static struct platform_driver tvc_connector_driver = {
305 .probe = tvc_probe, 307 .probe = tvc_probe,
306 .remove = __exit_p(tvc_remove), 308 .remove = __exit_p(tvc_remove),
@@ -308,6 +310,7 @@ static struct platform_driver tvc_connector_driver = {
308 .name = "connector-analog-tv", 310 .name = "connector-analog-tv",
309 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
310 .of_match_table = tvc_of_match, 312 .of_match_table = tvc_of_match,
313 .suppress_bind_attrs = true,
311 }, 314 },
312}; 315};
313 316
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index 74de2bc50c4f..2dfb6e5ff0cc 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -391,6 +391,7 @@ static struct platform_driver dvi_connector_driver = {
391 .name = "connector-dvi", 391 .name = "connector-dvi",
392 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
393 .of_match_table = dvic_of_match, 393 .of_match_table = dvic_of_match,
394 .suppress_bind_attrs = true,
394 }, 395 },
395}; 396};
396 397
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
index 131c6e260898..7b25967a91eb 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
@@ -437,6 +437,7 @@ static struct platform_driver hdmi_connector_driver = {
437 .name = "connector-hdmi", 437 .name = "connector-hdmi",
438 .owner = THIS_MODULE, 438 .owner = THIS_MODULE,
439 .of_match_table = hdmic_of_match, 439 .of_match_table = hdmic_of_match,
440 .suppress_bind_attrs = true,
440 }, 441 },
441}; 442};
442 443
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
index b4e9a42a79e6..47ee7cdee1c5 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
@@ -298,6 +298,7 @@ static struct platform_driver tfp410_driver = {
298 .name = "tfp410", 298 .name = "tfp410",
299 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
300 .of_match_table = tfp410_of_match, 300 .of_match_table = tfp410_of_match,
301 .suppress_bind_attrs = true,
301 }, 302 },
302}; 303};
303 304
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index c891d8f84cb2..c4abd56dd846 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
@@ -461,6 +461,7 @@ static struct platform_driver tpd_driver = {
461 .name = "tpd12s015", 461 .name = "tpd12s015",
462 .owner = THIS_MODULE, 462 .owner = THIS_MODULE,
463 .of_match_table = tpd_of_match, 463 .of_match_table = tpd_of_match,
464 .suppress_bind_attrs = true,
464 }, 465 },
465}; 466};
466 467
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
index 3636b61dc9b4..a9c3dcf0f6b5 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
@@ -327,6 +327,7 @@ static struct platform_driver panel_dpi_driver = {
327 .name = "panel-dpi", 327 .name = "panel-dpi",
328 .owner = THIS_MODULE, 328 .owner = THIS_MODULE,
329 .of_match_table = panel_dpi_of_match, 329 .of_match_table = panel_dpi_of_match,
330 .suppress_bind_attrs = true,
330 }, 331 },
331}; 332};
332 333
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
index d6f14e8717e8..899cb1ab523d 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
@@ -1378,6 +1378,7 @@ static struct platform_driver dsicm_driver = {
1378 .name = "panel-dsi-cm", 1378 .name = "panel-dsi-cm",
1379 .owner = THIS_MODULE, 1379 .owner = THIS_MODULE,
1380 .of_match_table = dsicm_of_match, 1380 .of_match_table = dsicm_of_match,
1381 .suppress_bind_attrs = true,
1381 }, 1382 },
1382}; 1383};
1383 1384
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
index cc5b5124e0b4..27d4fcfa1824 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
@@ -394,6 +394,7 @@ static struct spi_driver lb035q02_spi_driver = {
394 .name = "panel_lgphilips_lb035q02", 394 .name = "panel_lgphilips_lb035q02",
395 .owner = THIS_MODULE, 395 .owner = THIS_MODULE,
396 .of_match_table = lb035q02_of_match, 396 .of_match_table = lb035q02_of_match,
397 .suppress_bind_attrs = true,
397 }, 398 },
398}; 399};
399 400
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
index 3595f111aa35..ccf3f4f3c703 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
@@ -424,6 +424,7 @@ static struct spi_driver nec_8048_driver = {
424 .owner = THIS_MODULE, 424 .owner = THIS_MODULE,
425 .pm = NEC_8048_PM_OPS, 425 .pm = NEC_8048_PM_OPS,
426 .of_match_table = nec_8048_of_match, 426 .of_match_table = nec_8048_of_match,
427 .suppress_bind_attrs = true,
427 }, 428 },
428 .probe = nec_8048_probe, 429 .probe = nec_8048_probe,
429 .remove = nec_8048_remove, 430 .remove = nec_8048_remove,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
index f1f72ce50a17..234142cc3764 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
@@ -410,6 +410,7 @@ static struct platform_driver sharp_ls_driver = {
410 .name = "panel-sharp-ls037v7dw01", 410 .name = "panel-sharp-ls037v7dw01",
411 .owner = THIS_MODULE, 411 .owner = THIS_MODULE,
412 .of_match_table = sharp_ls_of_match, 412 .of_match_table = sharp_ls_of_match,
413 .suppress_bind_attrs = true,
413 }, 414 },
414}; 415};
415 416
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index 617f8d2f5127..337ccc5c0f5e 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -904,6 +904,7 @@ static struct spi_driver acx565akm_driver = {
904 .name = "acx565akm", 904 .name = "acx565akm",
905 .owner = THIS_MODULE, 905 .owner = THIS_MODULE,
906 .of_match_table = acx565akm_of_match, 906 .of_match_table = acx565akm_of_match,
907 .suppress_bind_attrs = true,
907 }, 908 },
908 .probe = acx565akm_probe, 909 .probe = acx565akm_probe,
909 .remove = acx565akm_remove, 910 .remove = acx565akm_remove,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
index 728808bcceeb..fbba0b8ca871 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -500,6 +500,7 @@ static struct spi_driver td028ttec1_spi_driver = {
500 .name = "panel-tpo-td028ttec1", 500 .name = "panel-tpo-td028ttec1",
501 .owner = THIS_MODULE, 501 .owner = THIS_MODULE,
502 .of_match_table = td028ttec1_of_match, 502 .of_match_table = td028ttec1_of_match,
503 .suppress_bind_attrs = true,
503 }, 504 },
504}; 505};
505 506
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
index de78ab0caaa8..5aba76bca25a 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
@@ -673,6 +673,7 @@ static struct spi_driver tpo_td043_spi_driver = {
673 .owner = THIS_MODULE, 673 .owner = THIS_MODULE,
674 .pm = &tpo_td043_spi_pm, 674 .pm = &tpo_td043_spi_pm,
675 .of_match_table = tpo_td043_of_match, 675 .of_match_table = tpo_td043_of_match,
676 .suppress_bind_attrs = true,
676 }, 677 },
677 .probe = tpo_td043_probe, 678 .probe = tpo_td043_probe,
678 .remove = tpo_td043_remove, 679 .remove = tpo_td043_remove,
diff --git a/drivers/video/fbdev/omap2/dss/apply.c b/drivers/video/fbdev/omap2/dss/apply.c
index 0a0b084ce65d..663ccc3bf4e5 100644
--- a/drivers/video/fbdev/omap2/dss/apply.c
+++ b/drivers/video/fbdev/omap2/dss/apply.c
@@ -1132,6 +1132,8 @@ static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
1132 if (!mp->enabled) 1132 if (!mp->enabled)
1133 goto out; 1133 goto out;
1134 1134
1135 wait_pending_extra_info_updates();
1136
1135 if (!mgr_manual_update(mgr)) 1137 if (!mgr_manual_update(mgr))
1136 dispc_mgr_disable_sync(mgr->id); 1138 dispc_mgr_disable_sync(mgr->id);
1137 1139
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index be053aa80880..0e9a74bb9fc2 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -3290,8 +3290,11 @@ static void dispc_dump_regs(struct seq_file *s)
3290 DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS); 3290 DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
3291 DUMPREG(i, DISPC_OVL_ROW_INC); 3291 DUMPREG(i, DISPC_OVL_ROW_INC);
3292 DUMPREG(i, DISPC_OVL_PIXEL_INC); 3292 DUMPREG(i, DISPC_OVL_PIXEL_INC);
3293
3293 if (dss_has_feature(FEAT_PRELOAD)) 3294 if (dss_has_feature(FEAT_PRELOAD))
3294 DUMPREG(i, DISPC_OVL_PRELOAD); 3295 DUMPREG(i, DISPC_OVL_PRELOAD);
3296 if (dss_has_feature(FEAT_MFLAG))
3297 DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
3295 3298
3296 if (i == OMAP_DSS_GFX) { 3299 if (i == OMAP_DSS_GFX) {
3297 DUMPREG(i, DISPC_OVL_WINDOW_SKIP); 3300 DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
@@ -3312,10 +3315,6 @@ static void dispc_dump_regs(struct seq_file *s)
3312 } 3315 }
3313 if (dss_has_feature(FEAT_ATTR2)) 3316 if (dss_has_feature(FEAT_ATTR2))
3314 DUMPREG(i, DISPC_OVL_ATTRIBUTES2); 3317 DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
3315 if (dss_has_feature(FEAT_PRELOAD))
3316 DUMPREG(i, DISPC_OVL_PRELOAD);
3317 if (dss_has_feature(FEAT_MFLAG))
3318 DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
3319 } 3318 }
3320 3319
3321#undef DISPC_REG 3320#undef DISPC_REG
@@ -3843,6 +3842,7 @@ static struct platform_driver omap_dispchw_driver = {
3843 .owner = THIS_MODULE, 3842 .owner = THIS_MODULE,
3844 .pm = &dispc_pm_ops, 3843 .pm = &dispc_pm_ops,
3845 .of_match_table = dispc_of_match, 3844 .of_match_table = dispc_of_match,
3845 .suppress_bind_attrs = true,
3846 }, 3846 },
3847}; 3847};
3848 3848
diff --git a/drivers/video/fbdev/omap2/dss/dispc.h b/drivers/video/fbdev/omap2/dss/dispc.h
index 78edb449c763..3043d6e0a5f9 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.h
+++ b/drivers/video/fbdev/omap2/dss/dispc.h
@@ -101,8 +101,7 @@
101 DISPC_FIR_COEF_V2_OFFSET(n, i)) 101 DISPC_FIR_COEF_V2_OFFSET(n, i))
102#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ 102#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
103 DISPC_PRELOAD_OFFSET(n)) 103 DISPC_PRELOAD_OFFSET(n))
104#define DISPC_OVL_MFLAG_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ 104#define DISPC_OVL_MFLAG_THRESHOLD(n) DISPC_MFLAG_THRESHOLD_OFFSET(n)
105 DISPC_MFLAG_THRESHOLD_OFFSET(n))
106 105
107/* DISPC up/downsampling FIR filter coefficient structure */ 106/* DISPC up/downsampling FIR filter coefficient structure */
108struct dispc_coef { 107struct dispc_coef {
diff --git a/drivers/video/fbdev/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c
index 9368972d6962..4a3363dae74a 100644
--- a/drivers/video/fbdev/omap2/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/dss/dpi.c
@@ -720,6 +720,7 @@ static struct platform_driver omap_dpi_driver = {
720 .driver = { 720 .driver = {
721 .name = "omapdss_dpi", 721 .name = "omapdss_dpi",
722 .owner = THIS_MODULE, 722 .owner = THIS_MODULE,
723 .suppress_bind_attrs = true,
723 }, 724 },
724}; 725};
725 726
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index b6f6ae1d4664..0793bc67a275 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -1603,7 +1603,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1603 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) { 1603 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
1604 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4; 1604 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1605 1605
1606 l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */ 1606 l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
1607 } 1607 }
1608 1608
1609 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1609 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
@@ -5754,6 +5754,7 @@ static struct platform_driver omap_dsihw_driver = {
5754 .owner = THIS_MODULE, 5754 .owner = THIS_MODULE,
5755 .pm = &dsi_pm_ops, 5755 .pm = &dsi_pm_ops,
5756 .of_match_table = dsi_of_match, 5756 .of_match_table = dsi_of_match,
5757 .suppress_bind_attrs = true,
5757 }, 5758 },
5758}; 5759};
5759 5760
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 6daeb7ed44c6..14bcd6c43f72 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -966,6 +966,7 @@ static struct platform_driver omap_dsshw_driver = {
966 .owner = THIS_MODULE, 966 .owner = THIS_MODULE,
967 .pm = &dss_pm_ops, 967 .pm = &dss_pm_ops,
968 .of_match_table = dss_of_match, 968 .of_match_table = dss_of_match,
969 .suppress_bind_attrs = true,
969 }, 970 },
970}; 971};
971 972
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c
index 6a8550cf43e5..9a8713ca090c 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4.c
@@ -781,6 +781,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
781 .owner = THIS_MODULE, 781 .owner = THIS_MODULE,
782 .pm = &hdmi_pm_ops, 782 .pm = &hdmi_pm_ops,
783 .of_match_table = hdmi_of_match, 783 .of_match_table = hdmi_of_match,
784 .suppress_bind_attrs = true,
784 }, 785 },
785}; 786};
786 787
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c
index 32d02ec34d23..169b764bb9d4 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5.c
@@ -806,6 +806,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
806 .owner = THIS_MODULE, 806 .owner = THIS_MODULE,
807 .pm = &hdmi_pm_ops, 807 .pm = &hdmi_pm_ops,
808 .of_match_table = hdmi_of_match, 808 .of_match_table = hdmi_of_match,
809 .suppress_bind_attrs = true,
809 }, 810 },
810}; 811};
811 812
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 54df12a8d744..6d92bb32fe51 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -124,16 +124,15 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
124 r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */ 124 r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
125 r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */ 125 r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
126 126
127 if (fmt->dcofreq) { 127 if (fmt->dcofreq)
128 /* divider programming for frequency beyond 1000Mhz */
129 REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
130 r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */ 128 r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
131 } else { 129 else
132 r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */ 130 r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
133 }
134 131
135 hdmi_write_reg(pll->base, PLLCTRL_CFG2, r); 132 hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
136 133
134 REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
135
137 r = hdmi_read_reg(pll->base, PLLCTRL_CFG4); 136 r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
138 r = FLD_MOD(r, fmt->regm2, 24, 18); 137 r = FLD_MOD(r, fmt->regm2, 24, 18);
139 r = FLD_MOD(r, fmt->regmf, 17, 0); 138 r = FLD_MOD(r, fmt->regmf, 17, 0);
@@ -144,8 +143,8 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
144 143
145 /* wait for bit change */ 144 /* wait for bit change */
146 if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO, 145 if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
147 0, 0, 1) != 1) { 146 0, 0, 0) != 0) {
148 DSSERR("PLL GO bit not set\n"); 147 DSSERR("PLL GO bit not clearing\n");
149 return -ETIMEDOUT; 148 return -ETIMEDOUT;
150 } 149 }
151 150
diff --git a/drivers/video/fbdev/omap2/dss/rfbi.c b/drivers/video/fbdev/omap2/dss/rfbi.c
index c8a81a2b879c..878273f58839 100644
--- a/drivers/video/fbdev/omap2/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/dss/rfbi.c
@@ -1044,6 +1044,7 @@ static struct platform_driver omap_rfbihw_driver = {
1044 .name = "omapdss_rfbi", 1044 .name = "omapdss_rfbi",
1045 .owner = THIS_MODULE, 1045 .owner = THIS_MODULE,
1046 .pm = &rfbi_pm_ops, 1046 .pm = &rfbi_pm_ops,
1047 .suppress_bind_attrs = true,
1047 }, 1048 },
1048}; 1049};
1049 1050
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index 911dcc9173a6..4c9c46d4ea60 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -377,6 +377,7 @@ static struct platform_driver omap_sdi_driver = {
377 .driver = { 377 .driver = {
378 .name = "omapdss_sdi", 378 .name = "omapdss_sdi",
379 .owner = THIS_MODULE, 379 .owner = THIS_MODULE,
380 .suppress_bind_attrs = true,
380 }, 381 },
381}; 382};
382 383
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 21d81113962b..d077d8a75ddc 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -966,6 +966,7 @@ static struct platform_driver omap_venchw_driver = {
966 .owner = THIS_MODULE, 966 .owner = THIS_MODULE,
967 .pm = &venc_pm_ops, 967 .pm = &venc_pm_ops,
968 .of_match_table = venc_of_match, 968 .of_match_table = venc_of_match,
969 .suppress_bind_attrs = true,
969 }, 970 },
970}; 971};
971 972
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 15872433e0c6..ce8a70570756 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1833,14 +1833,13 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
1833 if (fbdev == NULL) 1833 if (fbdev == NULL)
1834 return; 1834 return;
1835 1835
1836 for (i = 0; i < fbdev->num_fbs; i++) { 1836 for (i = 0; i < fbdev->num_overlays; i++) {
1837 struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); 1837 struct omap_overlay *ovl = fbdev->overlays[i];
1838 int j;
1839 1838
1840 for (j = 0; j < ofbi->num_overlays; j++) { 1839 ovl->disable(ovl);
1841 struct omap_overlay *ovl = ofbi->overlays[j]; 1840
1842 ovl->disable(ovl); 1841 if (ovl->manager)
1843 } 1842 ovl->unset_manager(ovl);
1844 } 1843 }
1845 1844
1846 for (i = 0; i < fbdev->num_fbs; i++) 1845 for (i = 0; i < fbdev->num_fbs; i++)
@@ -2619,7 +2618,7 @@ err0:
2619 return r; 2618 return r;
2620} 2619}
2621 2620
2622static int __exit omapfb_remove(struct platform_device *pdev) 2621static int omapfb_remove(struct platform_device *pdev)
2623{ 2622{
2624 struct omapfb2_device *fbdev = platform_get_drvdata(pdev); 2623 struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
2625 2624
@@ -2636,7 +2635,7 @@ static int __exit omapfb_remove(struct platform_device *pdev)
2636 2635
2637static struct platform_driver omapfb_driver = { 2636static struct platform_driver omapfb_driver = {
2638 .probe = omapfb_probe, 2637 .probe = omapfb_probe,
2639 .remove = __exit_p(omapfb_remove), 2638 .remove = omapfb_remove,
2640 .driver = { 2639 .driver = {
2641 .name = "omapfb", 2640 .name = "omapfb",
2642 .owner = THIS_MODULE, 2641 .owner = THIS_MODULE,
@@ -2651,6 +2650,7 @@ module_param_named(mirror, def_mirror, bool, 0);
2651 2650
2652module_platform_driver(omapfb_driver); 2651module_platform_driver(omapfb_driver);
2653 2652
2653MODULE_ALIAS("platform:omapfb");
2654MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); 2654MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
2655MODULE_DESCRIPTION("OMAP2/3 Framebuffer"); 2655MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
2656MODULE_LICENSE("GPL v2"); 2656MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index e3d5bf0a5021..d0107d424ee4 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -87,6 +87,15 @@ config DA9055_WATCHDOG
87 This driver can also be built as a module. If so, the module 87 This driver can also be built as a module. If so, the module
88 will be called da9055_wdt. 88 will be called da9055_wdt.
89 89
90config DA9063_WATCHDOG
91 tristate "Dialog DA9063 Watchdog"
92 depends on MFD_DA9063
93 select WATCHDOG_CORE
94 help
95 Support for the watchdog in the DA9063 PMIC.
96
97 This driver can be built as a module. The module name is da9063_wdt.
98
90config GPIO_WATCHDOG 99config GPIO_WATCHDOG
91 tristate "Watchdog device controlled through GPIO-line" 100 tristate "Watchdog device controlled through GPIO-line"
92 depends on OF_GPIO 101 depends on OF_GPIO
@@ -123,6 +132,7 @@ config WM8350_WATCHDOG
123 132
124config XILINX_WATCHDOG 133config XILINX_WATCHDOG
125 tristate "Xilinx Watchdog timer" 134 tristate "Xilinx Watchdog timer"
135 depends on HAS_IOMEM
126 select WATCHDOG_CORE 136 select WATCHDOG_CORE
127 help 137 help
128 Watchdog driver for the xps_timebase_wdt ip core. 138 Watchdog driver for the xps_timebase_wdt ip core.
@@ -157,6 +167,14 @@ config AT91SAM9X_WATCHDOG
157 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will 167 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
158 reboot your system when the timeout is reached. 168 reboot your system when the timeout is reached.
159 169
170config CADENCE_WATCHDOG
171 tristate "Cadence Watchdog Timer"
172 depends on ARM
173 select WATCHDOG_CORE
174 help
175 Say Y here if you want to include support for the watchdog
176 timer in the Xilinx Zynq.
177
160config 21285_WATCHDOG 178config 21285_WATCHDOG
161 tristate "DC21285 watchdog" 179 tristate "DC21285 watchdog"
162 depends on FOOTBRIDGE 180 depends on FOOTBRIDGE
@@ -319,6 +337,17 @@ config ORION_WATCHDOG
319 To compile this driver as a module, choose M here: the 337 To compile this driver as a module, choose M here: the
320 module will be called orion_wdt. 338 module will be called orion_wdt.
321 339
340config RN5T618_WATCHDOG
341 tristate "Ricoh RN5T618 watchdog"
342 depends on MFD_RN5T618
343 select WATCHDOG_CORE
344 help
345 If you say yes here you get support for watchdog on the Ricoh
346 RN5T618 PMIC.
347
348 This driver can also be built as a module. If so, the module
349 will be called rn5t618_wdt.
350
322config SUNXI_WATCHDOG 351config SUNXI_WATCHDOG
323 tristate "Allwinner SoCs watchdog support" 352 tristate "Allwinner SoCs watchdog support"
324 depends on ARCH_SUNXI 353 depends on ARCH_SUNXI
@@ -444,7 +473,7 @@ config SIRFSOC_WATCHDOG
444 473
445config TEGRA_WATCHDOG 474config TEGRA_WATCHDOG
446 tristate "Tegra watchdog" 475 tristate "Tegra watchdog"
447 depends on ARCH_TEGRA || COMPILE_TEST 476 depends on (ARCH_TEGRA || COMPILE_TEST) && HAS_IOMEM
448 select WATCHDOG_CORE 477 select WATCHDOG_CORE
449 help 478 help
450 Say Y here to include support for the watchdog timer 479 Say Y here to include support for the watchdog timer
@@ -453,6 +482,29 @@ config TEGRA_WATCHDOG
453 To compile this driver as a module, choose M here: the 482 To compile this driver as a module, choose M here: the
454 module will be called tegra_wdt. 483 module will be called tegra_wdt.
455 484
485config QCOM_WDT
486 tristate "QCOM watchdog"
487 depends on HAS_IOMEM
488 depends on ARCH_QCOM
489 select WATCHDOG_CORE
490 help
491 Say Y here to include Watchdog timer support for the watchdog found
492 on QCOM chipsets. Currently supported targets are the MSM8960,
493 APQ8064, and IPQ8064.
494
495 To compile this driver as a module, choose M here: the
496 module will be called qcom_wdt.
497
498config MESON_WATCHDOG
499 tristate "Amlogic Meson SoCs watchdog support"
500 depends on ARCH_MESON
501 select WATCHDOG_CORE
502 help
503 Say Y here to include support for the watchdog timer
504 in Amlogic Meson SoCs.
505 To compile this driver as a module, choose M here: the
506 module will be called meson_wdt.
507
456# AVR32 Architecture 508# AVR32 Architecture
457 509
458config AT32AP700X_WDT 510config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index de1701470c14..c569ec8f8a76 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
32obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o 32obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o
33obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o 33obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
34obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o 34obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
35obj-$(CONFIG_CADENCE_WATCHDOG) += cadence_wdt.o
35obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o 36obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o
36obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o 37obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o
37obj-$(CONFIG_21285_WATCHDOG) += wdt285.o 38obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
47obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o 48obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
48obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o 49obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
49obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o 50obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
51obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
50obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 52obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
51obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o 53obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
52obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 54obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
@@ -57,8 +59,10 @@ obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
57obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o 59obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
58obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o 60obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
59obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o 61obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
62obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
60obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o 63obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
61obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o 64obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
65obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
62 66
63# AVR32 Architecture 67# AVR32 Architecture
64obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 68obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -173,6 +177,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
173# Architecture Independent 177# Architecture Independent
174obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o 178obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
175obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o 179obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
180obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o
176obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o 181obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o
177obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o 182obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
178obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o 183obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 08a785398eac..e96b09b135c8 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -30,8 +30,6 @@
30 * occur, and the final time the board will reset. 30 * occur, and the final time the board will reset.
31 */ 31 */
32 32
33u32 booke_wdt_enabled;
34u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
35 33
36#ifdef CONFIG_PPC_FSL_BOOK3E 34#ifdef CONFIG_PPC_FSL_BOOK3E
37#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) 35#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
@@ -41,27 +39,10 @@ u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
41#define WDTP_MASK (TCR_WP_MASK) 39#define WDTP_MASK (TCR_WP_MASK)
42#endif 40#endif
43 41
44/* Checks wdt=x and wdt_period=xx command-line option */ 42static bool booke_wdt_enabled;
45notrace int __init early_parse_wdt(char *p) 43module_param(booke_wdt_enabled, bool, 0);
46{ 44static int booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
47 if (p && strncmp(p, "0", 1) != 0) 45module_param(booke_wdt_period, int, 0);
48 booke_wdt_enabled = 1;
49
50 return 0;
51}
52early_param("wdt", early_parse_wdt);
53
54int __init early_parse_wdt_period(char *p)
55{
56 unsigned long ret;
57 if (p) {
58 if (!kstrtol(p, 0, &ret))
59 booke_wdt_period = ret;
60 }
61
62 return 0;
63}
64early_param("wdt_period", early_parse_wdt_period);
65 46
66#ifdef CONFIG_PPC_FSL_BOOK3E 47#ifdef CONFIG_PPC_FSL_BOOK3E
67 48
@@ -259,5 +240,6 @@ static int __init booke_wdt_init(void)
259module_init(booke_wdt_init); 240module_init(booke_wdt_init);
260module_exit(booke_wdt_exit); 241module_exit(booke_wdt_exit);
261 242
243MODULE_ALIAS("booke_wdt");
262MODULE_DESCRIPTION("PowerPC Book-E watchdog driver"); 244MODULE_DESCRIPTION("PowerPC Book-E watchdog driver");
263MODULE_LICENSE("GPL"); 245MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
new file mode 100644
index 000000000000..5927c0a98a74
--- /dev/null
+++ b/drivers/watchdog/cadence_wdt.c
@@ -0,0 +1,516 @@
1/*
2 * Cadence WDT driver - Used by Xilinx Zynq
3 *
4 * Copyright (C) 2010 - 2014 Xilinx, Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/irq.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/reboot.h>
22#include <linux/watchdog.h>
23
24#define CDNS_WDT_DEFAULT_TIMEOUT 10
25/* Supports 1 - 516 sec */
26#define CDNS_WDT_MIN_TIMEOUT 1
27#define CDNS_WDT_MAX_TIMEOUT 516
28
29/* Restart key */
30#define CDNS_WDT_RESTART_KEY 0x00001999
31
32/* Counter register access key */
33#define CDNS_WDT_REGISTER_ACCESS_KEY 0x00920000
34
35/* Counter value divisor */
36#define CDNS_WDT_COUNTER_VALUE_DIVISOR 0x1000
37
38/* Clock prescaler value and selection */
39#define CDNS_WDT_PRESCALE_64 64
40#define CDNS_WDT_PRESCALE_512 512
41#define CDNS_WDT_PRESCALE_4096 4096
42#define CDNS_WDT_PRESCALE_SELECT_64 1
43#define CDNS_WDT_PRESCALE_SELECT_512 2
44#define CDNS_WDT_PRESCALE_SELECT_4096 3
45
46/* Input clock frequency */
47#define CDNS_WDT_CLK_10MHZ 10000000
48#define CDNS_WDT_CLK_75MHZ 75000000
49
50/* Counter maximum value */
51#define CDNS_WDT_COUNTER_MAX 0xFFF
52
53static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT;
54static int nowayout = WATCHDOG_NOWAYOUT;
55
56module_param(wdt_timeout, int, 0);
57MODULE_PARM_DESC(wdt_timeout,
58 "Watchdog time in seconds. (default="
59 __MODULE_STRING(CDNS_WDT_DEFAULT_TIMEOUT) ")");
60
61module_param(nowayout, int, 0);
62MODULE_PARM_DESC(nowayout,
63 "Watchdog cannot be stopped once started (default="
64 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
65
66/**
67 * struct cdns_wdt - Watchdog device structure
68 * @regs: baseaddress of device
69 * @rst: reset flag
70 * @clk: struct clk * of a clock source
71 * @prescaler: for saving prescaler value
72 * @ctrl_clksel: counter clock prescaler selection
73 * @io_lock: spinlock for IO register access
74 * @cdns_wdt_device: watchdog device structure
75 * @cdns_wdt_notifier: notifier structure
76 *
77 * Structure containing parameters specific to cadence watchdog.
78 */
79struct cdns_wdt {
80 void __iomem *regs;
81 bool rst;
82 struct clk *clk;
83 u32 prescaler;
84 u32 ctrl_clksel;
85 spinlock_t io_lock;
86 struct watchdog_device cdns_wdt_device;
87 struct notifier_block cdns_wdt_notifier;
88};
89
90/* Write access to Registers */
91static inline void cdns_wdt_writereg(struct cdns_wdt *wdt, u32 offset, u32 val)
92{
93 writel_relaxed(val, wdt->regs + offset);
94}
95
96/*************************Register Map**************************************/
97
98/* Register Offsets for the WDT */
99#define CDNS_WDT_ZMR_OFFSET 0x0 /* Zero Mode Register */
100#define CDNS_WDT_CCR_OFFSET 0x4 /* Counter Control Register */
101#define CDNS_WDT_RESTART_OFFSET 0x8 /* Restart Register */
102#define CDNS_WDT_SR_OFFSET 0xC /* Status Register */
103
104/*
105 * Zero Mode Register - This register controls how the time out is indicated
106 * and also contains the access code to allow writes to the register (0xABC).
107 */
108#define CDNS_WDT_ZMR_WDEN_MASK 0x00000001 /* Enable the WDT */
109#define CDNS_WDT_ZMR_RSTEN_MASK 0x00000002 /* Enable the reset output */
110#define CDNS_WDT_ZMR_IRQEN_MASK 0x00000004 /* Enable IRQ output */
111#define CDNS_WDT_ZMR_RSTLEN_16 0x00000030 /* Reset pulse of 16 pclk cycles */
112#define CDNS_WDT_ZMR_ZKEY_VAL 0x00ABC000 /* Access key, 0xABC << 12 */
113/*
114 * Counter Control register - This register controls how fast the timer runs
115 * and the reset value and also contains the access code to allow writes to
116 * the register.
117 */
118#define CDNS_WDT_CCR_CRV_MASK 0x00003FFC /* Counter reset value */
119
120/**
121 * cdns_wdt_stop - Stop the watchdog.
122 *
123 * @wdd: watchdog device
124 *
125 * Read the contents of the ZMR register, clear the WDEN bit
126 * in the register and set the access key for successful write.
127 *
128 * Return: always 0
129 */
130static int cdns_wdt_stop(struct watchdog_device *wdd)
131{
132 struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
133
134 spin_lock(&wdt->io_lock);
135 cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET,
136 CDNS_WDT_ZMR_ZKEY_VAL & (~CDNS_WDT_ZMR_WDEN_MASK));
137 spin_unlock(&wdt->io_lock);
138
139 return 0;
140}
141
142/**
143 * cdns_wdt_reload - Reload the watchdog timer (i.e. pat the watchdog).
144 *
145 * @wdd: watchdog device
146 *
147 * Write the restart key value (0x00001999) to the restart register.
148 *
149 * Return: always 0
150 */
151static int cdns_wdt_reload(struct watchdog_device *wdd)
152{
153 struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
154
155 spin_lock(&wdt->io_lock);
156 cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET,
157 CDNS_WDT_RESTART_KEY);
158 spin_unlock(&wdt->io_lock);
159
160 return 0;
161}
162
163/**
164 * cdns_wdt_start - Enable and start the watchdog.
165 *
166 * @wdd: watchdog device
167 *
168 * The counter value is calculated according to the formula:
169 * calculated count = (timeout * clock) / prescaler + 1.
170 * The calculated count is divided by 0x1000 to obtain the field value
171 * to write to counter control register.
172 * Clears the contents of prescaler and counter reset value. Sets the
173 * prescaler to 4096 and the calculated count and access key
174 * to write to CCR Register.
175 * Sets the WDT (WDEN bit) and either the Reset signal(RSTEN bit)
176 * or Interrupt signal(IRQEN) with a specified cycles and the access
177 * key to write to ZMR Register.
178 *
179 * Return: always 0
180 */
181static int cdns_wdt_start(struct watchdog_device *wdd)
182{
183 struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
184 unsigned int data = 0;
185 unsigned short count;
186 unsigned long clock_f = clk_get_rate(wdt->clk);
187
188 /*
189 * Counter value divisor to obtain the value of
190 * counter reset to be written to control register.
191 */
192 count = (wdd->timeout * (clock_f / wdt->prescaler)) /
193 CDNS_WDT_COUNTER_VALUE_DIVISOR + 1;
194
195 if (count > CDNS_WDT_COUNTER_MAX)
196 count = CDNS_WDT_COUNTER_MAX;
197
198 spin_lock(&wdt->io_lock);
199 cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET,
200 CDNS_WDT_ZMR_ZKEY_VAL);
201
202 count = (count << 2) & CDNS_WDT_CCR_CRV_MASK;
203
204 /* Write counter access key first to be able write to register */
205 data = count | CDNS_WDT_REGISTER_ACCESS_KEY | wdt->ctrl_clksel;
206 cdns_wdt_writereg(wdt, CDNS_WDT_CCR_OFFSET, data);
207 data = CDNS_WDT_ZMR_WDEN_MASK | CDNS_WDT_ZMR_RSTLEN_16 |
208 CDNS_WDT_ZMR_ZKEY_VAL;
209
210 /* Reset on timeout if specified in device tree. */
211 if (wdt->rst) {
212 data |= CDNS_WDT_ZMR_RSTEN_MASK;
213 data &= ~CDNS_WDT_ZMR_IRQEN_MASK;
214 } else {
215 data &= ~CDNS_WDT_ZMR_RSTEN_MASK;
216 data |= CDNS_WDT_ZMR_IRQEN_MASK;
217 }
218 cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET, data);
219 cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET,
220 CDNS_WDT_RESTART_KEY);
221 spin_unlock(&wdt->io_lock);
222
223 return 0;
224}
225
226/**
227 * cdns_wdt_settimeout - Set a new timeout value for the watchdog device.
228 *
229 * @wdd: watchdog device
230 * @new_time: new timeout value that needs to be set
231 * Return: 0 on success
232 *
233 * Update the watchdog_device timeout with new value which is used when
234 * cdns_wdt_start is called.
235 */
236static int cdns_wdt_settimeout(struct watchdog_device *wdd,
237 unsigned int new_time)
238{
239 wdd->timeout = new_time;
240
241 return cdns_wdt_start(wdd);
242}
243
244/**
245 * cdns_wdt_irq_handler - Notifies of watchdog timeout.
246 *
247 * @irq: interrupt number
248 * @dev_id: pointer to a platform device structure
249 * Return: IRQ_HANDLED
250 *
251 * The handler is invoked when the watchdog times out and a
252 * reset on timeout has not been enabled.
253 */
254static irqreturn_t cdns_wdt_irq_handler(int irq, void *dev_id)
255{
256 struct platform_device *pdev = dev_id;
257
258 dev_info(&pdev->dev,
259 "Watchdog timed out. Internal reset not enabled\n");
260
261 return IRQ_HANDLED;
262}
263
264/*
265 * Info structure used to indicate the features supported by the device
266 * to the upper layers. This is defined in watchdog.h header file.
267 */
268static struct watchdog_info cdns_wdt_info = {
269 .identity = "cdns_wdt watchdog",
270 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
271 WDIOF_MAGICCLOSE,
272};
273
274/* Watchdog Core Ops */
275static struct watchdog_ops cdns_wdt_ops = {
276 .owner = THIS_MODULE,
277 .start = cdns_wdt_start,
278 .stop = cdns_wdt_stop,
279 .ping = cdns_wdt_reload,
280 .set_timeout = cdns_wdt_settimeout,
281};
282
283/**
284 * cdns_wdt_notify_sys - Notifier for reboot or shutdown.
285 *
286 * @this: handle to notifier block
287 * @code: turn off indicator
288 * @unused: unused
289 * Return: NOTIFY_DONE
290 *
291 * This notifier is invoked whenever the system reboot or shutdown occur
292 * because we need to disable the WDT before system goes down as WDT might
293 * reset on the next boot.
294 */
295static int cdns_wdt_notify_sys(struct notifier_block *this, unsigned long code,
296 void *unused)
297{
298 struct cdns_wdt *wdt = container_of(this, struct cdns_wdt,
299 cdns_wdt_notifier);
300 if (code == SYS_DOWN || code == SYS_HALT)
301 cdns_wdt_stop(&wdt->cdns_wdt_device);
302
303 return NOTIFY_DONE;
304}
305
306/************************Platform Operations*****************************/
307/**
308 * cdns_wdt_probe - Probe call for the device.
309 *
310 * @pdev: handle to the platform device structure.
311 * Return: 0 on success, negative error otherwise.
312 *
313 * It does all the memory allocation and registration for the device.
314 */
315static int cdns_wdt_probe(struct platform_device *pdev)
316{
317 struct resource *res;
318 int ret, irq;
319 unsigned long clock_f;
320 struct cdns_wdt *wdt;
321 struct watchdog_device *cdns_wdt_device;
322
323 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
324 if (!wdt)
325 return -ENOMEM;
326
327 cdns_wdt_device = &wdt->cdns_wdt_device;
328 cdns_wdt_device->info = &cdns_wdt_info;
329 cdns_wdt_device->ops = &cdns_wdt_ops;
330 cdns_wdt_device->timeout = CDNS_WDT_DEFAULT_TIMEOUT;
331 cdns_wdt_device->min_timeout = CDNS_WDT_MIN_TIMEOUT;
332 cdns_wdt_device->max_timeout = CDNS_WDT_MAX_TIMEOUT;
333
334 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
335 wdt->regs = devm_ioremap_resource(&pdev->dev, res);
336 if (IS_ERR(wdt->regs))
337 return PTR_ERR(wdt->regs);
338
339 /* Register the interrupt */
340 wdt->rst = of_property_read_bool(pdev->dev.of_node, "reset-on-timeout");
341 irq = platform_get_irq(pdev, 0);
342 if (!wdt->rst && irq >= 0) {
343 ret = devm_request_irq(&pdev->dev, irq, cdns_wdt_irq_handler, 0,
344 pdev->name, pdev);
345 if (ret) {
346 dev_err(&pdev->dev,
347 "cannot register interrupt handler err=%d\n",
348 ret);
349 return ret;
350 }
351 }
352
353 /* Initialize the members of cdns_wdt structure */
354 cdns_wdt_device->parent = &pdev->dev;
355
356 ret = watchdog_init_timeout(cdns_wdt_device, wdt_timeout, &pdev->dev);
357 if (ret) {
358 dev_err(&pdev->dev, "unable to set timeout value\n");
359 return ret;
360 }
361
362 watchdog_set_nowayout(cdns_wdt_device, nowayout);
363 watchdog_set_drvdata(cdns_wdt_device, wdt);
364
365 wdt->clk = devm_clk_get(&pdev->dev, NULL);
366 if (IS_ERR(wdt->clk)) {
367 dev_err(&pdev->dev, "input clock not found\n");
368 ret = PTR_ERR(wdt->clk);
369 return ret;
370 }
371
372 ret = clk_prepare_enable(wdt->clk);
373 if (ret) {
374 dev_err(&pdev->dev, "unable to enable clock\n");
375 return ret;
376 }
377
378 clock_f = clk_get_rate(wdt->clk);
379 if (clock_f <= CDNS_WDT_CLK_75MHZ) {
380 wdt->prescaler = CDNS_WDT_PRESCALE_512;
381 wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_512;
382 } else {
383 wdt->prescaler = CDNS_WDT_PRESCALE_4096;
384 wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_4096;
385 }
386
387 spin_lock_init(&wdt->io_lock);
388
389 wdt->cdns_wdt_notifier.notifier_call = &cdns_wdt_notify_sys;
390 ret = register_reboot_notifier(&wdt->cdns_wdt_notifier);
391 if (ret != 0) {
392 dev_err(&pdev->dev, "cannot register reboot notifier err=%d)\n",
393 ret);
394 goto err_clk_disable;
395 }
396
397 ret = watchdog_register_device(cdns_wdt_device);
398 if (ret) {
399 dev_err(&pdev->dev, "Failed to register wdt device\n");
400 goto err_clk_disable;
401 }
402 platform_set_drvdata(pdev, wdt);
403
404 dev_dbg(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
405 wdt->regs, cdns_wdt_device->timeout,
406 nowayout ? ", nowayout" : "");
407
408 return 0;
409
410err_clk_disable:
411 clk_disable_unprepare(wdt->clk);
412
413 return ret;
414}
415
416/**
417 * cdns_wdt_remove - Probe call for the device.
418 *
419 * @pdev: handle to the platform device structure.
420 * Return: 0 on success, otherwise negative error.
421 *
422 * Unregister the device after releasing the resources.
423 */
424static int cdns_wdt_remove(struct platform_device *pdev)
425{
426 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
427
428 cdns_wdt_stop(&wdt->cdns_wdt_device);
429 watchdog_unregister_device(&wdt->cdns_wdt_device);
430 unregister_reboot_notifier(&wdt->cdns_wdt_notifier);
431 clk_disable_unprepare(wdt->clk);
432
433 return 0;
434}
435
436/**
437 * cdns_wdt_shutdown - Stop the device.
438 *
439 * @pdev: handle to the platform structure.
440 *
441 */
442static void cdns_wdt_shutdown(struct platform_device *pdev)
443{
444 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
445
446 cdns_wdt_stop(&wdt->cdns_wdt_device);
447 clk_disable_unprepare(wdt->clk);
448}
449
450/**
451 * cdns_wdt_suspend - Stop the device.
452 *
453 * @dev: handle to the device structure.
454 * Return: 0 always.
455 */
456static int __maybe_unused cdns_wdt_suspend(struct device *dev)
457{
458 struct platform_device *pdev = container_of(dev,
459 struct platform_device, dev);
460 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
461
462 cdns_wdt_stop(&wdt->cdns_wdt_device);
463 clk_disable_unprepare(wdt->clk);
464
465 return 0;
466}
467
468/**
469 * cdns_wdt_resume - Resume the device.
470 *
471 * @dev: handle to the device structure.
472 * Return: 0 on success, errno otherwise.
473 */
474static int __maybe_unused cdns_wdt_resume(struct device *dev)
475{
476 int ret;
477 struct platform_device *pdev = container_of(dev,
478 struct platform_device, dev);
479 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
480
481 ret = clk_prepare_enable(wdt->clk);
482 if (ret) {
483 dev_err(dev, "unable to enable clock\n");
484 return ret;
485 }
486 cdns_wdt_start(&wdt->cdns_wdt_device);
487
488 return 0;
489}
490
491static SIMPLE_DEV_PM_OPS(cdns_wdt_pm_ops, cdns_wdt_suspend, cdns_wdt_resume);
492
493static struct of_device_id cdns_wdt_of_match[] = {
494 { .compatible = "cdns,wdt-r1p2", },
495 { /* end of table */ }
496};
497MODULE_DEVICE_TABLE(of, cdns_wdt_of_match);
498
499/* Driver Structure */
500static struct platform_driver cdns_wdt_driver = {
501 .probe = cdns_wdt_probe,
502 .remove = cdns_wdt_remove,
503 .shutdown = cdns_wdt_shutdown,
504 .driver = {
505 .name = "cdns-wdt",
506 .owner = THIS_MODULE,
507 .of_match_table = cdns_wdt_of_match,
508 .pm = &cdns_wdt_pm_ops,
509 },
510};
511
512module_platform_driver(cdns_wdt_driver);
513
514MODULE_AUTHOR("Xilinx, Inc.");
515MODULE_DESCRIPTION("Watchdog driver for Cadence WDT");
516MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
new file mode 100644
index 000000000000..2cd6b2c2dd2a
--- /dev/null
+++ b/drivers/watchdog/da9063_wdt.c
@@ -0,0 +1,191 @@
1/*
2 * Watchdog driver for DA9063 PMICs.
3 *
4 * Copyright(c) 2012 Dialog Semiconductor Ltd.
5 *
6 * Author: Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/watchdog.h>
17#include <linux/platform_device.h>
18#include <linux/uaccess.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/mfd/da9063/registers.h>
22#include <linux/mfd/da9063/core.h>
23#include <linux/regmap.h>
24
25/*
26 * Watchdog selector to timeout in seconds.
27 * 0: WDT disabled;
28 * others: timeout = 2048 ms * 2^(TWDSCALE-1).
29 */
30static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
31#define DA9063_TWDSCALE_DISABLE 0
32#define DA9063_TWDSCALE_MIN 1
33#define DA9063_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1)
34#define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN]
35#define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX]
36#define DA9063_WDG_TIMEOUT wdt_timeout[3]
37
38struct da9063_watchdog {
39 struct da9063 *da9063;
40 struct watchdog_device wdtdev;
41};
42
43static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
44{
45 unsigned int i;
46
47 for (i = DA9063_TWDSCALE_MIN; i <= DA9063_TWDSCALE_MAX; i++) {
48 if (wdt_timeout[i] >= secs)
49 return i;
50 }
51
52 return DA9063_TWDSCALE_MAX;
53}
54
55static int _da9063_wdt_set_timeout(struct da9063 *da9063, unsigned int regval)
56{
57 return regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_D,
58 DA9063_TWDSCALE_MASK, regval);
59}
60
61static int da9063_wdt_start(struct watchdog_device *wdd)
62{
63 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
64 unsigned int selector;
65 int ret;
66
67 selector = da9063_wdt_timeout_to_sel(wdt->wdtdev.timeout);
68 ret = _da9063_wdt_set_timeout(wdt->da9063, selector);
69 if (ret)
70 dev_err(wdt->da9063->dev, "Watchdog failed to start (err = %d)\n",
71 ret);
72
73 return ret;
74}
75
76static int da9063_wdt_stop(struct watchdog_device *wdd)
77{
78 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
79 int ret;
80
81 ret = regmap_update_bits(wdt->da9063->regmap, DA9063_REG_CONTROL_D,
82 DA9063_TWDSCALE_MASK, DA9063_TWDSCALE_DISABLE);
83 if (ret)
84 dev_alert(wdt->da9063->dev, "Watchdog failed to stop (err = %d)\n",
85 ret);
86
87 return ret;
88}
89
90static int da9063_wdt_ping(struct watchdog_device *wdd)
91{
92 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
93 int ret;
94
95 ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F,
96 DA9063_WATCHDOG);
97 if (ret)
98 dev_alert(wdt->da9063->dev, "Failed to ping the watchdog (err = %d)\n",
99 ret);
100
101 return ret;
102}
103
104static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
105 unsigned int timeout)
106{
107 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
108 unsigned int selector;
109 int ret;
110
111 selector = da9063_wdt_timeout_to_sel(timeout);
112 ret = _da9063_wdt_set_timeout(wdt->da9063, selector);
113 if (ret)
114 dev_err(wdt->da9063->dev, "Failed to set watchdog timeout (err = %d)\n",
115 ret);
116 else
117 wdd->timeout = wdt_timeout[selector];
118
119 return ret;
120}
121
122static const struct watchdog_info da9063_watchdog_info = {
123 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
124 .identity = "DA9063 Watchdog",
125};
126
127static const struct watchdog_ops da9063_watchdog_ops = {
128 .owner = THIS_MODULE,
129 .start = da9063_wdt_start,
130 .stop = da9063_wdt_stop,
131 .ping = da9063_wdt_ping,
132 .set_timeout = da9063_wdt_set_timeout,
133};
134
135static int da9063_wdt_probe(struct platform_device *pdev)
136{
137 int ret;
138 struct da9063 *da9063;
139 struct da9063_watchdog *wdt;
140
141 if (!pdev->dev.parent)
142 return -EINVAL;
143
144 da9063 = dev_get_drvdata(pdev->dev.parent);
145 if (!da9063)
146 return -EINVAL;
147
148 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
149 if (!wdt)
150 return -ENOMEM;
151
152 wdt->da9063 = da9063;
153
154 wdt->wdtdev.info = &da9063_watchdog_info;
155 wdt->wdtdev.ops = &da9063_watchdog_ops;
156 wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
157 wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
158 wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
159
160 wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
161
162 watchdog_set_drvdata(&wdt->wdtdev, wdt);
163 dev_set_drvdata(&pdev->dev, wdt);
164
165 ret = watchdog_register_device(&wdt->wdtdev);
166
167 return ret;
168}
169
170static int da9063_wdt_remove(struct platform_device *pdev)
171{
172 struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev);
173
174 watchdog_unregister_device(&wdt->wdtdev);
175
176 return 0;
177}
178
179static struct platform_driver da9063_wdt_driver = {
180 .probe = da9063_wdt_probe,
181 .remove = da9063_wdt_remove,
182 .driver = {
183 .name = DA9063_DRVNAME_WATCHDOG,
184 },
185};
186module_platform_driver(da9063_wdt_driver);
187
188MODULE_AUTHOR("Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>");
189MODULE_DESCRIPTION("Watchdog driver for Dialog DA9063");
190MODULE_LICENSE("GPL");
191MODULE_ALIAS("platform:" DA9063_DRVNAME_WATCHDOG);
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 9f210299de24..9e577a64ec9e 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/delay.h>
24#include <linux/device.h> 25#include <linux/device.h>
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
@@ -29,9 +30,11 @@
29#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/notifier.h>
32#include <linux/of.h> 34#include <linux/of.h>
33#include <linux/pm.h> 35#include <linux/pm.h>
34#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/reboot.h>
35#include <linux/spinlock.h> 38#include <linux/spinlock.h>
36#include <linux/timer.h> 39#include <linux/timer.h>
37#include <linux/uaccess.h> 40#include <linux/uaccess.h>
@@ -40,6 +43,7 @@
40#define WDOG_CONTROL_REG_OFFSET 0x00 43#define WDOG_CONTROL_REG_OFFSET 0x00
41#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 44#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
42#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04 45#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
46#define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
43#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08 47#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
44#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c 48#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
45#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76 49#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
@@ -62,6 +66,7 @@ static struct {
62 unsigned long next_heartbeat; 66 unsigned long next_heartbeat;
63 struct timer_list timer; 67 struct timer_list timer;
64 int expect_close; 68 int expect_close;
69 struct notifier_block restart_handler;
65} dw_wdt; 70} dw_wdt;
66 71
67static inline int dw_wdt_is_enabled(void) 72static inline int dw_wdt_is_enabled(void)
@@ -106,7 +111,8 @@ static int dw_wdt_set_top(unsigned top_s)
106 } 111 }
107 112
108 /* Set the new value in the watchdog. */ 113 /* Set the new value in the watchdog. */
109 writel(top_val, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); 114 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
115 dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
110 116
111 dw_wdt_set_next_heartbeat(); 117 dw_wdt_set_next_heartbeat();
112 118
@@ -119,6 +125,26 @@ static void dw_wdt_keepalive(void)
119 WDOG_COUNTER_RESTART_REG_OFFSET); 125 WDOG_COUNTER_RESTART_REG_OFFSET);
120} 126}
121 127
128static int dw_wdt_restart_handle(struct notifier_block *this,
129 unsigned long mode, void *cmd)
130{
131 u32 val;
132
133 writel(0, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
134 val = readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
135 if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
136 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
137 WDOG_COUNTER_RESTART_REG_OFFSET);
138 else
139 writel(WDOG_CONTROL_REG_WDT_EN_MASK,
140 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
141
142 /* wait for reset to assert... */
143 mdelay(500);
144
145 return NOTIFY_DONE;
146}
147
122static void dw_wdt_ping(unsigned long data) 148static void dw_wdt_ping(unsigned long data)
123{ 149{
124 if (time_before(jiffies, dw_wdt.next_heartbeat) || 150 if (time_before(jiffies, dw_wdt.next_heartbeat) ||
@@ -314,6 +340,12 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
314 if (ret) 340 if (ret)
315 goto out_disable_clk; 341 goto out_disable_clk;
316 342
343 dw_wdt.restart_handler.notifier_call = dw_wdt_restart_handle;
344 dw_wdt.restart_handler.priority = 128;
345 ret = register_restart_handler(&dw_wdt.restart_handler);
346 if (ret)
347 pr_warn("cannot register restart handler\n");
348
317 dw_wdt_set_next_heartbeat(); 349 dw_wdt_set_next_heartbeat();
318 setup_timer(&dw_wdt.timer, dw_wdt_ping, 0); 350 setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
319 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT); 351 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
@@ -328,6 +360,8 @@ out_disable_clk:
328 360
329static int dw_wdt_drv_remove(struct platform_device *pdev) 361static int dw_wdt_drv_remove(struct platform_device *pdev)
330{ 362{
363 unregister_restart_handler(&dw_wdt.restart_handler);
364
331 misc_deregister(&dw_wdt_miscdev); 365 misc_deregister(&dw_wdt_miscdev);
332 366
333 clk_disable_unprepare(dw_wdt.clk); 367 clk_disable_unprepare(dw_wdt.clk);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 68c3d379ffa8..7e12f88bb4a6 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -22,14 +22,17 @@
22 */ 22 */
23 23
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/delay.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/io.h> 27#include <linux/io.h>
27#include <linux/jiffies.h> 28#include <linux/jiffies.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
32#include <linux/notifier.h>
31#include <linux/of_address.h> 33#include <linux/of_address.h>
32#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/reboot.h>
33#include <linux/regmap.h> 36#include <linux/regmap.h>
34#include <linux/timer.h> 37#include <linux/timer.h>
35#include <linux/watchdog.h> 38#include <linux/watchdog.h>
@@ -59,6 +62,7 @@ struct imx2_wdt_device {
59 struct regmap *regmap; 62 struct regmap *regmap;
60 struct timer_list timer; /* Pings the watchdog when closed */ 63 struct timer_list timer; /* Pings the watchdog when closed */
61 struct watchdog_device wdog; 64 struct watchdog_device wdog;
65 struct notifier_block restart_handler;
62}; 66};
63 67
64static bool nowayout = WATCHDOG_NOWAYOUT; 68static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -77,6 +81,31 @@ static const struct watchdog_info imx2_wdt_info = {
77 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 81 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
78}; 82};
79 83
84static int imx2_restart_handler(struct notifier_block *this, unsigned long mode,
85 void *cmd)
86{
87 unsigned int wcr_enable = IMX2_WDT_WCR_WDE;
88 struct imx2_wdt_device *wdev = container_of(this,
89 struct imx2_wdt_device,
90 restart_handler);
91 /* Assert SRS signal */
92 regmap_write(wdev->regmap, 0, wcr_enable);
93 /*
94 * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be
95 * written twice), we add another two writes to ensure there must be at
96 * least two writes happen in the same one 32kHz clock period. We save
97 * the target check here, since the writes shouldn't be a huge burden
98 * for other platforms.
99 */
100 regmap_write(wdev->regmap, 0, wcr_enable);
101 regmap_write(wdev->regmap, 0, wcr_enable);
102
103 /* wait for reset to assert... */
104 mdelay(500);
105
106 return NOTIFY_DONE;
107}
108
80static inline void imx2_wdt_setup(struct watchdog_device *wdog) 109static inline void imx2_wdt_setup(struct watchdog_device *wdog)
81{ 110{
82 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); 111 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -191,12 +220,10 @@ static struct regmap_config imx2_wdt_regmap_config = {
191 220
192static int __init imx2_wdt_probe(struct platform_device *pdev) 221static int __init imx2_wdt_probe(struct platform_device *pdev)
193{ 222{
194 struct device_node *np = pdev->dev.of_node;
195 struct imx2_wdt_device *wdev; 223 struct imx2_wdt_device *wdev;
196 struct watchdog_device *wdog; 224 struct watchdog_device *wdog;
197 struct resource *res; 225 struct resource *res;
198 void __iomem *base; 226 void __iomem *base;
199 bool big_endian;
200 int ret; 227 int ret;
201 u32 val; 228 u32 val;
202 229
@@ -204,10 +231,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
204 if (!wdev) 231 if (!wdev)
205 return -ENOMEM; 232 return -ENOMEM;
206 233
207 big_endian = of_property_read_bool(np, "big-endian");
208 if (big_endian)
209 imx2_wdt_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
210
211 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
212 base = devm_ioremap_resource(&pdev->dev, res); 235 base = devm_ioremap_resource(&pdev->dev, res);
213 if (IS_ERR(base)) 236 if (IS_ERR(base))
@@ -257,6 +280,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
257 return ret; 280 return ret;
258 } 281 }
259 282
283 wdev->restart_handler.notifier_call = imx2_restart_handler;
284 wdev->restart_handler.priority = 128;
285 ret = register_restart_handler(&wdev->restart_handler);
286 if (ret)
287 dev_err(&pdev->dev, "cannot register restart handler\n");
288
260 dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n", 289 dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n",
261 wdog->timeout, nowayout); 290 wdog->timeout, nowayout);
262 291
@@ -268,6 +297,8 @@ static int __exit imx2_wdt_remove(struct platform_device *pdev)
268 struct watchdog_device *wdog = platform_get_drvdata(pdev); 297 struct watchdog_device *wdog = platform_get_drvdata(pdev);
269 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); 298 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
270 299
300 unregister_restart_handler(&wdev->restart_handler);
301
271 watchdog_unregister_device(wdog); 302 watchdog_unregister_device(wdog);
272 303
273 if (imx2_wdt_is_running(wdev)) { 304 if (imx2_wdt_is_running(wdev)) {
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
new file mode 100644
index 000000000000..ef6a298e8c45
--- /dev/null
+++ b/drivers/watchdog/meson_wdt.c
@@ -0,0 +1,236 @@
1/*
2 * Meson Watchdog Driver
3 *
4 * Copyright (c) 2014 Carlo Caione
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/notifier.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/reboot.h>
24#include <linux/types.h>
25#include <linux/watchdog.h>
26
27#define DRV_NAME "meson_wdt"
28
29#define MESON_WDT_TC 0x00
30#define MESON_WDT_TC_EN BIT(22)
31#define MESON_WDT_TC_TM_MASK 0x3fffff
32#define MESON_WDT_DC_RESET (3 << 24)
33
34#define MESON_WDT_RESET 0x04
35
36#define MESON_WDT_TIMEOUT 30
37#define MESON_WDT_MIN_TIMEOUT 1
38#define MESON_WDT_MAX_TIMEOUT (MESON_WDT_TC_TM_MASK / 100000)
39
40#define MESON_SEC_TO_TC(s) ((s) * 100000)
41
42static bool nowayout = WATCHDOG_NOWAYOUT;
43static unsigned int timeout = MESON_WDT_TIMEOUT;
44
45struct meson_wdt_dev {
46 struct watchdog_device wdt_dev;
47 void __iomem *wdt_base;
48 struct notifier_block restart_handler;
49};
50
51static int meson_restart_handle(struct notifier_block *this, unsigned long mode,
52 void *cmd)
53{
54 u32 tc_reboot = MESON_WDT_DC_RESET | MESON_WDT_TC_EN;
55 struct meson_wdt_dev *meson_wdt = container_of(this,
56 struct meson_wdt_dev,
57 restart_handler);
58
59 while (1) {
60 writel(tc_reboot, meson_wdt->wdt_base + MESON_WDT_TC);
61 mdelay(5);
62 }
63
64 return NOTIFY_DONE;
65}
66
67static int meson_wdt_ping(struct watchdog_device *wdt_dev)
68{
69 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
70
71 writel(0, meson_wdt->wdt_base + MESON_WDT_RESET);
72
73 return 0;
74}
75
76static void meson_wdt_change_timeout(struct watchdog_device *wdt_dev,
77 unsigned int timeout)
78{
79 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
80 u32 reg;
81
82 reg = readl(meson_wdt->wdt_base + MESON_WDT_TC);
83 reg &= ~MESON_WDT_TC_TM_MASK;
84 reg |= MESON_SEC_TO_TC(timeout);
85 writel(reg, meson_wdt->wdt_base + MESON_WDT_TC);
86}
87
88static int meson_wdt_set_timeout(struct watchdog_device *wdt_dev,
89 unsigned int timeout)
90{
91 wdt_dev->timeout = timeout;
92
93 meson_wdt_change_timeout(wdt_dev, timeout);
94 meson_wdt_ping(wdt_dev);
95
96 return 0;
97}
98
99static int meson_wdt_stop(struct watchdog_device *wdt_dev)
100{
101 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
102 u32 reg;
103
104 reg = readl(meson_wdt->wdt_base + MESON_WDT_TC);
105 reg &= ~MESON_WDT_TC_EN;
106 writel(reg, meson_wdt->wdt_base + MESON_WDT_TC);
107
108 return 0;
109}
110
111static int meson_wdt_start(struct watchdog_device *wdt_dev)
112{
113 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
114 u32 reg;
115
116 meson_wdt_change_timeout(wdt_dev, meson_wdt->wdt_dev.timeout);
117 meson_wdt_ping(wdt_dev);
118
119 reg = readl(meson_wdt->wdt_base + MESON_WDT_TC);
120 reg |= MESON_WDT_TC_EN;
121 writel(reg, meson_wdt->wdt_base + MESON_WDT_TC);
122
123 return 0;
124}
125
126static const struct watchdog_info meson_wdt_info = {
127 .identity = DRV_NAME,
128 .options = WDIOF_SETTIMEOUT |
129 WDIOF_KEEPALIVEPING |
130 WDIOF_MAGICCLOSE,
131};
132
133static const struct watchdog_ops meson_wdt_ops = {
134 .owner = THIS_MODULE,
135 .start = meson_wdt_start,
136 .stop = meson_wdt_stop,
137 .ping = meson_wdt_ping,
138 .set_timeout = meson_wdt_set_timeout,
139};
140
141static int meson_wdt_probe(struct platform_device *pdev)
142{
143 struct resource *res;
144 struct meson_wdt_dev *meson_wdt;
145 int err;
146
147 meson_wdt = devm_kzalloc(&pdev->dev, sizeof(*meson_wdt), GFP_KERNEL);
148 if (!meson_wdt)
149 return -ENOMEM;
150
151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
152 meson_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
153 if (IS_ERR(meson_wdt->wdt_base))
154 return PTR_ERR(meson_wdt->wdt_base);
155
156 meson_wdt->wdt_dev.parent = &pdev->dev;
157 meson_wdt->wdt_dev.info = &meson_wdt_info;
158 meson_wdt->wdt_dev.ops = &meson_wdt_ops;
159 meson_wdt->wdt_dev.timeout = MESON_WDT_TIMEOUT;
160 meson_wdt->wdt_dev.max_timeout = MESON_WDT_MAX_TIMEOUT;
161 meson_wdt->wdt_dev.min_timeout = MESON_WDT_MIN_TIMEOUT;
162
163 watchdog_set_drvdata(&meson_wdt->wdt_dev, meson_wdt);
164
165 watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, &pdev->dev);
166 watchdog_set_nowayout(&meson_wdt->wdt_dev, nowayout);
167
168 meson_wdt_stop(&meson_wdt->wdt_dev);
169
170 err = watchdog_register_device(&meson_wdt->wdt_dev);
171 if (err)
172 return err;
173
174 platform_set_drvdata(pdev, meson_wdt);
175
176 meson_wdt->restart_handler.notifier_call = meson_restart_handle;
177 meson_wdt->restart_handler.priority = 128;
178 err = register_restart_handler(&meson_wdt->restart_handler);
179 if (err)
180 dev_err(&pdev->dev,
181 "cannot register restart handler (err=%d)\n", err);
182
183 dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
184 meson_wdt->wdt_dev.timeout, nowayout);
185
186 return 0;
187}
188
189static int meson_wdt_remove(struct platform_device *pdev)
190{
191 struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev);
192
193 unregister_restart_handler(&meson_wdt->restart_handler);
194
195 watchdog_unregister_device(&meson_wdt->wdt_dev);
196
197 return 0;
198}
199
200static void meson_wdt_shutdown(struct platform_device *pdev)
201{
202 struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev);
203
204 meson_wdt_stop(&meson_wdt->wdt_dev);
205}
206
207static const struct of_device_id meson_wdt_dt_ids[] = {
208 { .compatible = "amlogic,meson6-wdt" },
209 { /* sentinel */ }
210};
211MODULE_DEVICE_TABLE(of, meson_wdt_dt_ids);
212
213static struct platform_driver meson_wdt_driver = {
214 .probe = meson_wdt_probe,
215 .remove = meson_wdt_remove,
216 .shutdown = meson_wdt_shutdown,
217 .driver = {
218 .owner = THIS_MODULE,
219 .name = DRV_NAME,
220 .of_match_table = meson_wdt_dt_ids,
221 },
222};
223
224module_platform_driver(meson_wdt_driver);
225
226module_param(timeout, uint, 0);
227MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
228
229module_param(nowayout, bool, 0);
230MODULE_PARM_DESC(nowayout,
231 "Watchdog cannot be stopped once started (default="
232 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
233
234MODULE_LICENSE("GPL");
235MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
236MODULE_DESCRIPTION("Meson Watchdog Timer Driver");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 1e6e28df5d7b..b2e1b4cbbdc1 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -236,7 +236,6 @@ static struct platform_driver xwdt_driver = {
236 .probe = xwdt_probe, 236 .probe = xwdt_probe,
237 .remove = xwdt_remove, 237 .remove = xwdt_remove,
238 .driver = { 238 .driver = {
239 .owner = THIS_MODULE,
240 .name = WATCHDOG_NAME, 239 .name = WATCHDOG_NAME,
241 .of_match_table = xwdt_of_match, 240 .of_match_table = xwdt_of_match,
242 }, 241 },
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
new file mode 100644
index 000000000000..aa85618c4d03
--- /dev/null
+++ b/drivers/watchdog/qcom-wdt.c
@@ -0,0 +1,224 @@
1/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/reboot.h>
21#include <linux/watchdog.h>
22
23#define WDT_RST 0x0
24#define WDT_EN 0x8
25#define WDT_BITE_TIME 0x24
26
27struct qcom_wdt {
28 struct watchdog_device wdd;
29 struct clk *clk;
30 unsigned long rate;
31 struct notifier_block restart_nb;
32 void __iomem *base;
33};
34
35static inline
36struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
37{
38 return container_of(wdd, struct qcom_wdt, wdd);
39}
40
41static int qcom_wdt_start(struct watchdog_device *wdd)
42{
43 struct qcom_wdt *wdt = to_qcom_wdt(wdd);
44
45 writel(0, wdt->base + WDT_EN);
46 writel(1, wdt->base + WDT_RST);
47 writel(wdd->timeout * wdt->rate, wdt->base + WDT_BITE_TIME);
48 writel(1, wdt->base + WDT_EN);
49 return 0;
50}
51
52static int qcom_wdt_stop(struct watchdog_device *wdd)
53{
54 struct qcom_wdt *wdt = to_qcom_wdt(wdd);
55
56 writel(0, wdt->base + WDT_EN);
57 return 0;
58}
59
60static int qcom_wdt_ping(struct watchdog_device *wdd)
61{
62 struct qcom_wdt *wdt = to_qcom_wdt(wdd);
63
64 writel(1, wdt->base + WDT_RST);
65 return 0;
66}
67
68static int qcom_wdt_set_timeout(struct watchdog_device *wdd,
69 unsigned int timeout)
70{
71 wdd->timeout = timeout;
72 return qcom_wdt_start(wdd);
73}
74
75static const struct watchdog_ops qcom_wdt_ops = {
76 .start = qcom_wdt_start,
77 .stop = qcom_wdt_stop,
78 .ping = qcom_wdt_ping,
79 .set_timeout = qcom_wdt_set_timeout,
80 .owner = THIS_MODULE,
81};
82
83static const struct watchdog_info qcom_wdt_info = {
84 .options = WDIOF_KEEPALIVEPING
85 | WDIOF_MAGICCLOSE
86 | WDIOF_SETTIMEOUT,
87 .identity = KBUILD_MODNAME,
88};
89
90static int qcom_wdt_restart(struct notifier_block *nb, unsigned long action,
91 void *data)
92{
93 struct qcom_wdt *wdt = container_of(nb, struct qcom_wdt, restart_nb);
94 u32 timeout;
95
96 /*
97 * Trigger watchdog bite:
98 * Setup BITE_TIME to be 128ms, and enable WDT.
99 */
100 timeout = 128 * wdt->rate / 1000;
101
102 writel(0, wdt->base + WDT_EN);
103 writel(1, wdt->base + WDT_RST);
104 writel(timeout, wdt->base + WDT_BITE_TIME);
105 writel(1, wdt->base + WDT_EN);
106
107 /*
108 * Actually make sure the above sequence hits hardware before sleeping.
109 */
110 wmb();
111
112 msleep(150);
113 return NOTIFY_DONE;
114}
115
116static int qcom_wdt_probe(struct platform_device *pdev)
117{
118 struct qcom_wdt *wdt;
119 struct resource *res;
120 int ret;
121
122 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
123 if (!wdt)
124 return -ENOMEM;
125
126 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
127 wdt->base = devm_ioremap_resource(&pdev->dev, res);
128 if (IS_ERR(wdt->base))
129 return PTR_ERR(wdt->base);
130
131 wdt->clk = devm_clk_get(&pdev->dev, NULL);
132 if (IS_ERR(wdt->clk)) {
133 dev_err(&pdev->dev, "failed to get input clock\n");
134 return PTR_ERR(wdt->clk);
135 }
136
137 ret = clk_prepare_enable(wdt->clk);
138 if (ret) {
139 dev_err(&pdev->dev, "failed to setup clock\n");
140 return ret;
141 }
142
143 /*
144 * We use the clock rate to calculate the max timeout, so ensure it's
145 * not zero to avoid a divide-by-zero exception.
146 *
147 * WATCHDOG_CORE assumes units of seconds, if the WDT is clocked such
148 * that it would bite before a second elapses it's usefulness is
149 * limited. Bail if this is the case.
150 */
151 wdt->rate = clk_get_rate(wdt->clk);
152 if (wdt->rate == 0 ||
153 wdt->rate > 0x10000000U) {
154 dev_err(&pdev->dev, "invalid clock rate\n");
155 ret = -EINVAL;
156 goto err_clk_unprepare;
157 }
158
159 wdt->wdd.dev = &pdev->dev;
160 wdt->wdd.info = &qcom_wdt_info;
161 wdt->wdd.ops = &qcom_wdt_ops;
162 wdt->wdd.min_timeout = 1;
163 wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
164
165 /*
166 * If 'timeout-sec' unspecified in devicetree, assume a 30 second
167 * default, unless the max timeout is less than 30 seconds, then use
168 * the max instead.
169 */
170 wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U);
171 watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
172
173 ret = watchdog_register_device(&wdt->wdd);
174 if (ret) {
175 dev_err(&pdev->dev, "failed to register watchdog\n");
176 goto err_clk_unprepare;
177 }
178
179 /*
180 * WDT restart notifier has priority 0 (use as a last resort)
181 */
182 wdt->restart_nb.notifier_call = qcom_wdt_restart;
183 ret = register_restart_handler(&wdt->restart_nb);
184 if (ret)
185 dev_err(&pdev->dev, "failed to setup restart handler\n");
186
187 platform_set_drvdata(pdev, wdt);
188 return 0;
189
190err_clk_unprepare:
191 clk_disable_unprepare(wdt->clk);
192 return ret;
193}
194
195static int qcom_wdt_remove(struct platform_device *pdev)
196{
197 struct qcom_wdt *wdt = platform_get_drvdata(pdev);
198
199 unregister_restart_handler(&wdt->restart_nb);
200 watchdog_unregister_device(&wdt->wdd);
201 clk_disable_unprepare(wdt->clk);
202 return 0;
203}
204
205static const struct of_device_id qcom_wdt_of_table[] = {
206 { .compatible = "qcom,kpss-wdt-msm8960", },
207 { .compatible = "qcom,kpss-wdt-apq8064", },
208 { .compatible = "qcom,kpss-wdt-ipq8064", },
209 { },
210};
211MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
212
213static struct platform_driver qcom_watchdog_driver = {
214 .probe = qcom_wdt_probe,
215 .remove = qcom_wdt_remove,
216 .driver = {
217 .name = KBUILD_MODNAME,
218 .of_match_table = qcom_wdt_of_table,
219 },
220};
221module_platform_driver(qcom_watchdog_driver);
222
223MODULE_DESCRIPTION("QCOM KPSS Watchdog Driver");
224MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
new file mode 100644
index 000000000000..d1c12278cb6a
--- /dev/null
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -0,0 +1,198 @@
1/*
2 * Watchdog driver for Ricoh RN5T618 PMIC
3 *
4 * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * You should have received a copy of the GNU General Public License
11 * along with this program. If not, see <http://www.gnu.org/licenses/>.
12 */
13
14#include <linux/device.h>
15#include <linux/mfd/rn5t618.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/watchdog.h>
19
20#define DRIVER_NAME "rn5t618-wdt"
21
22static bool nowayout = WATCHDOG_NOWAYOUT;
23static unsigned int timeout;
24
25module_param(timeout, uint, 0);
26MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds");
27
28module_param(nowayout, bool, 0);
29MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
30 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
31
32struct rn5t618_wdt {
33 struct watchdog_device wdt_dev;
34 struct rn5t618 *rn5t618;
35};
36
37/*
38 * This array encodes the values of WDOGTIM field for the supported
39 * watchdog expiration times. If the watchdog is not accessed before
40 * the timer expiration, the PMU generates an interrupt and if the CPU
41 * doesn't clear it within one second the system is restarted.
42 */
43static const struct {
44 u8 reg_val;
45 unsigned int time;
46} rn5t618_wdt_map[] = {
47 { 0, 1 },
48 { 1, 8 },
49 { 2, 32 },
50 { 3, 128 },
51};
52
53static int rn5t618_wdt_set_timeout(struct watchdog_device *wdt_dev,
54 unsigned int t)
55{
56 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
57 int ret, i;
58
59 for (i = 0; i < ARRAY_SIZE(rn5t618_wdt_map); i++) {
60 if (rn5t618_wdt_map[i].time + 1 >= t)
61 break;
62 }
63
64 if (i == ARRAY_SIZE(rn5t618_wdt_map))
65 return -EINVAL;
66
67 ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
68 RN5T618_WATCHDOG_WDOGTIM_M,
69 rn5t618_wdt_map[i].reg_val);
70 if (!ret)
71 wdt_dev->timeout = rn5t618_wdt_map[i].time;
72
73 return ret;
74}
75
76static int rn5t618_wdt_start(struct watchdog_device *wdt_dev)
77{
78 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
79 int ret;
80
81 ret = rn5t618_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
82 if (ret)
83 return ret;
84
85 /* enable repower-on */
86 ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_REPCNT,
87 RN5T618_REPCNT_REPWRON,
88 RN5T618_REPCNT_REPWRON);
89 if (ret)
90 return ret;
91
92 /* enable watchdog */
93 ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
94 RN5T618_WATCHDOG_WDOGEN,
95 RN5T618_WATCHDOG_WDOGEN);
96 if (ret)
97 return ret;
98
99 /* enable watchdog interrupt */
100 return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIREN,
101 RN5T618_PWRIRQ_IR_WDOG,
102 RN5T618_PWRIRQ_IR_WDOG);
103}
104
105static int rn5t618_wdt_stop(struct watchdog_device *wdt_dev)
106{
107 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
108
109 return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
110 RN5T618_WATCHDOG_WDOGEN, 0);
111}
112
113static int rn5t618_wdt_ping(struct watchdog_device *wdt_dev)
114{
115 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
116 unsigned int val;
117 int ret;
118
119 /* The counter is restarted after a R/W access to watchdog register */
120 ret = regmap_read(wdt->rn5t618->regmap, RN5T618_WATCHDOG, &val);
121 if (ret)
122 return ret;
123
124 ret = regmap_write(wdt->rn5t618->regmap, RN5T618_WATCHDOG, val);
125 if (ret)
126 return ret;
127
128 /* Clear pending watchdog interrupt */
129 return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIRQ,
130 RN5T618_PWRIRQ_IR_WDOG, 0);
131}
132
133static struct watchdog_info rn5t618_wdt_info = {
134 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
135 WDIOF_KEEPALIVEPING,
136 .identity = DRIVER_NAME,
137};
138
139static struct watchdog_ops rn5t618_wdt_ops = {
140 .owner = THIS_MODULE,
141 .start = rn5t618_wdt_start,
142 .stop = rn5t618_wdt_stop,
143 .ping = rn5t618_wdt_ping,
144 .set_timeout = rn5t618_wdt_set_timeout,
145};
146
147static int rn5t618_wdt_probe(struct platform_device *pdev)
148{
149 struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
150 struct rn5t618_wdt *wdt;
151 int min_timeout, max_timeout;
152
153 wdt = devm_kzalloc(&pdev->dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
154 if (!wdt)
155 return -ENOMEM;
156
157 min_timeout = rn5t618_wdt_map[0].time;
158 max_timeout = rn5t618_wdt_map[ARRAY_SIZE(rn5t618_wdt_map) - 1].time;
159
160 wdt->rn5t618 = rn5t618;
161 wdt->wdt_dev.info = &rn5t618_wdt_info;
162 wdt->wdt_dev.ops = &rn5t618_wdt_ops;
163 wdt->wdt_dev.min_timeout = min_timeout;
164 wdt->wdt_dev.max_timeout = max_timeout;
165 wdt->wdt_dev.timeout = max_timeout;
166 wdt->wdt_dev.parent = &pdev->dev;
167
168 watchdog_set_drvdata(&wdt->wdt_dev, wdt);
169 watchdog_init_timeout(&wdt->wdt_dev, timeout, &pdev->dev);
170 watchdog_set_nowayout(&wdt->wdt_dev, nowayout);
171
172 platform_set_drvdata(pdev, wdt);
173
174 return watchdog_register_device(&wdt->wdt_dev);
175}
176
177static int rn5t618_wdt_remove(struct platform_device *pdev)
178{
179 struct rn5t618_wdt *wdt = platform_get_drvdata(pdev);
180
181 watchdog_unregister_device(&wdt->wdt_dev);
182
183 return 0;
184}
185
186static struct platform_driver rn5t618_wdt_driver = {
187 .probe = rn5t618_wdt_probe,
188 .remove = rn5t618_wdt_remove,
189 .driver = {
190 .name = DRIVER_NAME,
191 },
192};
193
194module_platform_driver(rn5t618_wdt_driver);
195
196MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
197MODULE_DESCRIPTION("RN5T618 watchdog driver");
198MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 7c6ccd071baf..8532c3e2aea7 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -41,6 +41,8 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/mfd/syscon.h> 42#include <linux/mfd/syscon.h>
43#include <linux/regmap.h> 43#include <linux/regmap.h>
44#include <linux/reboot.h>
45#include <linux/delay.h>
44 46
45#define S3C2410_WTCON 0x00 47#define S3C2410_WTCON 0x00
46#define S3C2410_WTDAT 0x04 48#define S3C2410_WTDAT 0x04
@@ -128,6 +130,7 @@ struct s3c2410_wdt {
128 unsigned long wtdat_save; 130 unsigned long wtdat_save;
129 struct watchdog_device wdt_device; 131 struct watchdog_device wdt_device;
130 struct notifier_block freq_transition; 132 struct notifier_block freq_transition;
133 struct notifier_block restart_handler;
131 struct s3c2410_wdt_variant *drv_data; 134 struct s3c2410_wdt_variant *drv_data;
132 struct regmap *pmureg; 135 struct regmap *pmureg;
133}; 136};
@@ -155,6 +158,15 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
155 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, 158 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
156}; 159};
157 160
161static const struct s3c2410_wdt_variant drv_data_exynos7 = {
162 .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
163 .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
164 .mask_bit = 0,
165 .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
166 .rst_stat_bit = 23, /* A57 WDTRESET */
167 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
168};
169
158static const struct of_device_id s3c2410_wdt_match[] = { 170static const struct of_device_id s3c2410_wdt_match[] = {
159 { .compatible = "samsung,s3c2410-wdt", 171 { .compatible = "samsung,s3c2410-wdt",
160 .data = &drv_data_s3c2410 }, 172 .data = &drv_data_s3c2410 },
@@ -162,6 +174,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
162 .data = &drv_data_exynos5250 }, 174 .data = &drv_data_exynos5250 },
163 { .compatible = "samsung,exynos5420-wdt", 175 { .compatible = "samsung,exynos5420-wdt",
164 .data = &drv_data_exynos5420 }, 176 .data = &drv_data_exynos5420 },
177 { .compatible = "samsung,exynos7-wdt",
178 .data = &drv_data_exynos7 },
165 {}, 179 {},
166}; 180};
167MODULE_DEVICE_TABLE(of, s3c2410_wdt_match); 181MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
@@ -438,6 +452,31 @@ static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
438} 452}
439#endif 453#endif
440 454
455static int s3c2410wdt_restart(struct notifier_block *this,
456 unsigned long mode, void *cmd)
457{
458 struct s3c2410_wdt *wdt = container_of(this, struct s3c2410_wdt,
459 restart_handler);
460 void __iomem *wdt_base = wdt->reg_base;
461
462 /* disable watchdog, to be safe */
463 writel(0, wdt_base + S3C2410_WTCON);
464
465 /* put initial values into count and data */
466 writel(0x80, wdt_base + S3C2410_WTCNT);
467 writel(0x80, wdt_base + S3C2410_WTDAT);
468
469 /* set the watchdog to go and reset... */
470 writel(S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV16 |
471 S3C2410_WTCON_RSTEN | S3C2410_WTCON_PRESCALE(0x20),
472 wdt_base + S3C2410_WTCON);
473
474 /* wait for reset to assert... */
475 mdelay(500);
476
477 return NOTIFY_DONE;
478}
479
441static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt) 480static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
442{ 481{
443 unsigned int rst_stat; 482 unsigned int rst_stat;
@@ -592,6 +631,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
592 631
593 platform_set_drvdata(pdev, wdt); 632 platform_set_drvdata(pdev, wdt);
594 633
634 wdt->restart_handler.notifier_call = s3c2410wdt_restart;
635 wdt->restart_handler.priority = 128;
636 ret = register_restart_handler(&wdt->restart_handler);
637 if (ret)
638 pr_err("cannot register restart handler, %d\n", ret);
639
595 /* print out a statement of readiness */ 640 /* print out a statement of readiness */
596 641
597 wtcon = readl(wdt->reg_base + S3C2410_WTCON); 642 wtcon = readl(wdt->reg_base + S3C2410_WTCON);
@@ -621,6 +666,8 @@ static int s3c2410wdt_remove(struct platform_device *dev)
621 int ret; 666 int ret;
622 struct s3c2410_wdt *wdt = platform_get_drvdata(dev); 667 struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
623 668
669 unregister_restart_handler(&wdt->restart_handler);
670
624 ret = s3c2410wdt_mask_and_disable_reset(wdt, true); 671 ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
625 if (ret < 0) 672 if (ret < 0)
626 return ret; 673 return ret;
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index 3804d5e9baea..a62b1b6decf4 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -94,9 +94,33 @@ static int stmp3xxx_wdt_remove(struct platform_device *pdev)
94 return 0; 94 return 0;
95} 95}
96 96
97static int __maybe_unused stmp3xxx_wdt_suspend(struct device *dev)
98{
99 struct watchdog_device *wdd = &stmp3xxx_wdd;
100
101 if (watchdog_active(wdd))
102 return wdt_stop(wdd);
103
104 return 0;
105}
106
107static int __maybe_unused stmp3xxx_wdt_resume(struct device *dev)
108{
109 struct watchdog_device *wdd = &stmp3xxx_wdd;
110
111 if (watchdog_active(wdd))
112 return wdt_start(wdd);
113
114 return 0;
115}
116
117static SIMPLE_DEV_PM_OPS(stmp3xxx_wdt_pm_ops,
118 stmp3xxx_wdt_suspend, stmp3xxx_wdt_resume);
119
97static struct platform_driver stmp3xxx_wdt_driver = { 120static struct platform_driver stmp3xxx_wdt_driver = {
98 .driver = { 121 .driver = {
99 .name = "stmp3xxx_rtc_wdt", 122 .name = "stmp3xxx_rtc_wdt",
123 .pm = &stmp3xxx_wdt_pm_ops,
100 }, 124 },
101 .probe = stmp3xxx_wdt_probe, 125 .probe = stmp3xxx_wdt_probe,
102 .remove = stmp3xxx_wdt_remove, 126 .remove = stmp3xxx_wdt_remove,
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 480bb557f353..b62301e74e5f 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -23,6 +23,7 @@
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/notifier.h> 24#include <linux/notifier.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_device.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/reboot.h> 28#include <linux/reboot.h>
28#include <linux/types.h> 29#include <linux/types.h>
@@ -30,15 +31,11 @@
30 31
31#define WDT_MAX_TIMEOUT 16 32#define WDT_MAX_TIMEOUT 16
32#define WDT_MIN_TIMEOUT 1 33#define WDT_MIN_TIMEOUT 1
33#define WDT_MODE_TIMEOUT(n) ((n) << 3) 34#define WDT_TIMEOUT_MASK 0x0F
34#define WDT_TIMEOUT_MASK WDT_MODE_TIMEOUT(0x0F)
35 35
36#define WDT_CTRL 0x00
37#define WDT_CTRL_RELOAD ((1 << 0) | (0x0a57 << 1)) 36#define WDT_CTRL_RELOAD ((1 << 0) | (0x0a57 << 1))
38 37
39#define WDT_MODE 0x04
40#define WDT_MODE_EN (1 << 0) 38#define WDT_MODE_EN (1 << 0)
41#define WDT_MODE_RST_EN (1 << 1)
42 39
43#define DRV_NAME "sunxi-wdt" 40#define DRV_NAME "sunxi-wdt"
44#define DRV_VERSION "1.0" 41#define DRV_VERSION "1.0"
@@ -46,15 +43,29 @@
46static bool nowayout = WATCHDOG_NOWAYOUT; 43static bool nowayout = WATCHDOG_NOWAYOUT;
47static unsigned int timeout = WDT_MAX_TIMEOUT; 44static unsigned int timeout = WDT_MAX_TIMEOUT;
48 45
46/*
47 * This structure stores the register offsets for different variants
48 * of Allwinner's watchdog hardware.
49 */
50struct sunxi_wdt_reg {
51 u8 wdt_ctrl;
52 u8 wdt_cfg;
53 u8 wdt_mode;
54 u8 wdt_timeout_shift;
55 u8 wdt_reset_mask;
56 u8 wdt_reset_val;
57};
58
49struct sunxi_wdt_dev { 59struct sunxi_wdt_dev {
50 struct watchdog_device wdt_dev; 60 struct watchdog_device wdt_dev;
51 void __iomem *wdt_base; 61 void __iomem *wdt_base;
62 const struct sunxi_wdt_reg *wdt_regs;
52 struct notifier_block restart_handler; 63 struct notifier_block restart_handler;
53}; 64};
54 65
55/* 66/*
56 * wdt_timeout_map maps the watchdog timer interval value in seconds to 67 * wdt_timeout_map maps the watchdog timer interval value in seconds to
57 * the value of the register WDT_MODE bit 3:6 68 * the value of the register WDT_MODE at bits .wdt_timeout_shift ~ +3
58 * 69 *
59 * [timeout seconds] = register value 70 * [timeout seconds] = register value
60 * 71 *
@@ -82,19 +93,32 @@ static int sunxi_restart_handle(struct notifier_block *this, unsigned long mode,
82 struct sunxi_wdt_dev, 93 struct sunxi_wdt_dev,
83 restart_handler); 94 restart_handler);
84 void __iomem *wdt_base = sunxi_wdt->wdt_base; 95 void __iomem *wdt_base = sunxi_wdt->wdt_base;
96 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
97 u32 val;
98
99 /* Set system reset function */
100 val = readl(wdt_base + regs->wdt_cfg);
101 val &= ~(regs->wdt_reset_mask);
102 val |= regs->wdt_reset_val;
103 writel(val, wdt_base + regs->wdt_cfg);
85 104
86 /* Enable timer and set reset bit in the watchdog */ 105 /* Set lowest timeout and enable watchdog */
87 writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE); 106 val = readl(wdt_base + regs->wdt_mode);
107 val &= ~(WDT_TIMEOUT_MASK << regs->wdt_timeout_shift);
108 val |= WDT_MODE_EN;
109 writel(val, wdt_base + regs->wdt_mode);
88 110
89 /* 111 /*
90 * Restart the watchdog. The default (and lowest) interval 112 * Restart the watchdog. The default (and lowest) interval
91 * value for the watchdog is 0.5s. 113 * value for the watchdog is 0.5s.
92 */ 114 */
93 writel(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL); 115 writel(WDT_CTRL_RELOAD, wdt_base + regs->wdt_ctrl);
94 116
95 while (1) { 117 while (1) {
96 mdelay(5); 118 mdelay(5);
97 writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE); 119 val = readl(wdt_base + regs->wdt_mode);
120 val |= WDT_MODE_EN;
121 writel(val, wdt_base + regs->wdt_mode);
98 } 122 }
99 return NOTIFY_DONE; 123 return NOTIFY_DONE;
100} 124}
@@ -103,8 +127,9 @@ static int sunxi_wdt_ping(struct watchdog_device *wdt_dev)
103{ 127{
104 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 128 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
105 void __iomem *wdt_base = sunxi_wdt->wdt_base; 129 void __iomem *wdt_base = sunxi_wdt->wdt_base;
130 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
106 131
107 iowrite32(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL); 132 writel(WDT_CTRL_RELOAD, wdt_base + regs->wdt_ctrl);
108 133
109 return 0; 134 return 0;
110} 135}
@@ -114,6 +139,7 @@ static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev,
114{ 139{
115 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 140 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
116 void __iomem *wdt_base = sunxi_wdt->wdt_base; 141 void __iomem *wdt_base = sunxi_wdt->wdt_base;
142 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
117 u32 reg; 143 u32 reg;
118 144
119 if (wdt_timeout_map[timeout] == 0) 145 if (wdt_timeout_map[timeout] == 0)
@@ -121,10 +147,10 @@ static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev,
121 147
122 sunxi_wdt->wdt_dev.timeout = timeout; 148 sunxi_wdt->wdt_dev.timeout = timeout;
123 149
124 reg = ioread32(wdt_base + WDT_MODE); 150 reg = readl(wdt_base + regs->wdt_mode);
125 reg &= ~WDT_TIMEOUT_MASK; 151 reg &= ~(WDT_TIMEOUT_MASK << regs->wdt_timeout_shift);
126 reg |= WDT_MODE_TIMEOUT(wdt_timeout_map[timeout]); 152 reg |= wdt_timeout_map[timeout] << regs->wdt_timeout_shift;
127 iowrite32(reg, wdt_base + WDT_MODE); 153 writel(reg, wdt_base + regs->wdt_mode);
128 154
129 sunxi_wdt_ping(wdt_dev); 155 sunxi_wdt_ping(wdt_dev);
130 156
@@ -135,8 +161,9 @@ static int sunxi_wdt_stop(struct watchdog_device *wdt_dev)
135{ 161{
136 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 162 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
137 void __iomem *wdt_base = sunxi_wdt->wdt_base; 163 void __iomem *wdt_base = sunxi_wdt->wdt_base;
164 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
138 165
139 iowrite32(0, wdt_base + WDT_MODE); 166 writel(0, wdt_base + regs->wdt_mode);
140 167
141 return 0; 168 return 0;
142} 169}
@@ -146,6 +173,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
146 u32 reg; 173 u32 reg;
147 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 174 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
148 void __iomem *wdt_base = sunxi_wdt->wdt_base; 175 void __iomem *wdt_base = sunxi_wdt->wdt_base;
176 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
149 int ret; 177 int ret;
150 178
151 ret = sunxi_wdt_set_timeout(&sunxi_wdt->wdt_dev, 179 ret = sunxi_wdt_set_timeout(&sunxi_wdt->wdt_dev,
@@ -153,9 +181,16 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
153 if (ret < 0) 181 if (ret < 0)
154 return ret; 182 return ret;
155 183
156 reg = ioread32(wdt_base + WDT_MODE); 184 /* Set system reset function */
157 reg |= (WDT_MODE_RST_EN | WDT_MODE_EN); 185 reg = readl(wdt_base + regs->wdt_cfg);
158 iowrite32(reg, wdt_base + WDT_MODE); 186 reg &= ~(regs->wdt_reset_mask);
187 reg |= ~(regs->wdt_reset_val);
188 writel(reg, wdt_base + regs->wdt_cfg);
189
190 /* Enable watchdog */
191 reg = readl(wdt_base + regs->wdt_mode);
192 reg |= WDT_MODE_EN;
193 writel(reg, wdt_base + regs->wdt_mode);
159 194
160 return 0; 195 return 0;
161} 196}
@@ -175,9 +210,35 @@ static const struct watchdog_ops sunxi_wdt_ops = {
175 .set_timeout = sunxi_wdt_set_timeout, 210 .set_timeout = sunxi_wdt_set_timeout,
176}; 211};
177 212
213static const struct sunxi_wdt_reg sun4i_wdt_reg = {
214 .wdt_ctrl = 0x00,
215 .wdt_cfg = 0x04,
216 .wdt_mode = 0x04,
217 .wdt_timeout_shift = 3,
218 .wdt_reset_mask = 0x02,
219 .wdt_reset_val = 0x02,
220};
221
222static const struct sunxi_wdt_reg sun6i_wdt_reg = {
223 .wdt_ctrl = 0x10,
224 .wdt_cfg = 0x14,
225 .wdt_mode = 0x18,
226 .wdt_timeout_shift = 4,
227 .wdt_reset_mask = 0x03,
228 .wdt_reset_val = 0x01,
229};
230
231static const struct of_device_id sunxi_wdt_dt_ids[] = {
232 { .compatible = "allwinner,sun4i-a10-wdt", .data = &sun4i_wdt_reg },
233 { .compatible = "allwinner,sun6i-a31-wdt", .data = &sun6i_wdt_reg },
234 { /* sentinel */ }
235};
236MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
237
178static int sunxi_wdt_probe(struct platform_device *pdev) 238static int sunxi_wdt_probe(struct platform_device *pdev)
179{ 239{
180 struct sunxi_wdt_dev *sunxi_wdt; 240 struct sunxi_wdt_dev *sunxi_wdt;
241 const struct of_device_id *device;
181 struct resource *res; 242 struct resource *res;
182 int err; 243 int err;
183 244
@@ -187,6 +248,12 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
187 248
188 platform_set_drvdata(pdev, sunxi_wdt); 249 platform_set_drvdata(pdev, sunxi_wdt);
189 250
251 device = of_match_device(sunxi_wdt_dt_ids, &pdev->dev);
252 if (!device)
253 return -ENODEV;
254
255 sunxi_wdt->wdt_regs = device->data;
256
190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 257 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
191 sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res); 258 sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
192 if (IS_ERR(sunxi_wdt->wdt_base)) 259 if (IS_ERR(sunxi_wdt->wdt_base))
@@ -242,12 +309,6 @@ static void sunxi_wdt_shutdown(struct platform_device *pdev)
242 sunxi_wdt_stop(&sunxi_wdt->wdt_dev); 309 sunxi_wdt_stop(&sunxi_wdt->wdt_dev);
243} 310}
244 311
245static const struct of_device_id sunxi_wdt_dt_ids[] = {
246 { .compatible = "allwinner,sun4i-a10-wdt" },
247 { /* sentinel */ }
248};
249MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
250
251static struct platform_driver sunxi_wdt_driver = { 312static struct platform_driver sunxi_wdt_driver = {
252 .probe = sunxi_wdt_probe, 313 .probe = sunxi_wdt_probe,
253 .remove = sunxi_wdt_remove, 314 .remove = sunxi_wdt_remove,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index afa9d6ef353a..dee9c6cbe6df 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -428,11 +428,7 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
428 428
429static int ts72xx_wdt_remove(struct platform_device *pdev) 429static int ts72xx_wdt_remove(struct platform_device *pdev)
430{ 430{
431 int error; 431 return misc_deregister(&ts72xx_wdt_miscdev);
432
433 error = misc_deregister(&ts72xx_wdt_miscdev);
434
435 return error;
436} 432}
437 433
438static struct platform_driver ts72xx_wdt_driver = { 434static struct platform_driver ts72xx_wdt_driver = {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 1e0a317d3dcd..3860d02729dc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -167,6 +167,9 @@ static struct page *balloon_next_page(struct page *page)
167 167
168static enum bp_state update_schedule(enum bp_state state) 168static enum bp_state update_schedule(enum bp_state state)
169{ 169{
170 if (state == BP_ECANCELED)
171 return BP_ECANCELED;
172
170 if (state == BP_DONE) { 173 if (state == BP_DONE) {
171 balloon_stats.schedule_delay = 1; 174 balloon_stats.schedule_delay = 1;
172 balloon_stats.retry_count = 1; 175 balloon_stats.retry_count = 1;
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index dd9c249ea311..95ee4302ffb8 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -41,24 +41,29 @@ static int xen_add_device(struct device *dev)
41#endif 41#endif
42 42
43 if (pci_seg_supported) { 43 if (pci_seg_supported) {
44 struct physdev_pci_device_add add = { 44 struct {
45 .seg = pci_domain_nr(pci_dev->bus), 45 struct physdev_pci_device_add add;
46 .bus = pci_dev->bus->number, 46 uint32_t pxm;
47 .devfn = pci_dev->devfn 47 } add_ext = {
48 .add.seg = pci_domain_nr(pci_dev->bus),
49 .add.bus = pci_dev->bus->number,
50 .add.devfn = pci_dev->devfn
48 }; 51 };
52 struct physdev_pci_device_add *add = &add_ext.add;
53
49#ifdef CONFIG_ACPI 54#ifdef CONFIG_ACPI
50 acpi_handle handle; 55 acpi_handle handle;
51#endif 56#endif
52 57
53#ifdef CONFIG_PCI_IOV 58#ifdef CONFIG_PCI_IOV
54 if (pci_dev->is_virtfn) { 59 if (pci_dev->is_virtfn) {
55 add.flags = XEN_PCI_DEV_VIRTFN; 60 add->flags = XEN_PCI_DEV_VIRTFN;
56 add.physfn.bus = physfn->bus->number; 61 add->physfn.bus = physfn->bus->number;
57 add.physfn.devfn = physfn->devfn; 62 add->physfn.devfn = physfn->devfn;
58 } else 63 } else
59#endif 64#endif
60 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) 65 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
61 add.flags = XEN_PCI_DEV_EXTFN; 66 add->flags = XEN_PCI_DEV_EXTFN;
62 67
63#ifdef CONFIG_ACPI 68#ifdef CONFIG_ACPI
64 handle = ACPI_HANDLE(&pci_dev->dev); 69 handle = ACPI_HANDLE(&pci_dev->dev);
@@ -77,8 +82,8 @@ static int xen_add_device(struct device *dev)
77 status = acpi_evaluate_integer(handle, "_PXM", 82 status = acpi_evaluate_integer(handle, "_PXM",
78 NULL, &pxm); 83 NULL, &pxm);
79 if (ACPI_SUCCESS(status)) { 84 if (ACPI_SUCCESS(status)) {
80 add.optarr[0] = pxm; 85 add->optarr[0] = pxm;
81 add.flags |= XEN_PCI_DEV_PXM; 86 add->flags |= XEN_PCI_DEV_PXM;
82 break; 87 break;
83 } 88 }
84 status = acpi_get_parent(handle, &handle); 89 status = acpi_get_parent(handle, &handle);
@@ -86,7 +91,7 @@ static int xen_add_device(struct device *dev)
86 } 91 }
87#endif /* CONFIG_ACPI */ 92#endif /* CONFIG_ACPI */
88 93
89 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); 94 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
90 if (r != -ENOSYS) 95 if (r != -ENOSYS)
91 return r; 96 return r;
92 pci_seg_supported = false; 97 pci_seg_supported = false;