aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS7
-rw-r--r--Documentation/IPMI.txt5
-rw-r--r--Documentation/acpi/enumeration.txt2
-rw-r--r--Documentation/acpi/gpio-properties.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/omap/l3-noc.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/abracon,abx80x.txt30
-rw-r--r--Documentation/kasan.txt8
-rw-r--r--Documentation/powerpc/transactional_memory.txt32
-rw-r--r--MAINTAINERS58
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts11
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts4
-rw-r--r--arch/arm/boot/dts/dra7.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420-trip-points.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5440-trip-points.dtsi2
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/imx25.dtsi1
-rw-r--r--arch/arm/boot/dts/imx28.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi17
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi15
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts13
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/xen/page.h1
-rw-r--r--arch/arm/kernel/perf_event_cpu.c9
-rw-r--r--arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c2
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/prm-regbits-44xx.h1
-rw-r--r--arch/arm/mach-omap2/vc.c12
-rw-r--r--arch/arm/mach-omap2/vc.h2
-rw-r--r--arch/arm/mach-omap2/vc3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/vc44xx_data.c1
-rw-r--r--arch/arm/mach-pxa/Kconfig9
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h7
-rw-r--r--arch/arm/mach-pxa/include/mach/mainstone.h6
-rw-r--r--arch/arm/mach-pxa/lubbock.c108
-rw-r--r--arch/arm/mach-pxa/mainstone.c115
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c200
-rw-r--r--arch/arm/mach-rockchip/pm.c33
-rw-r--r--arch/arm/mach-rockchip/pm.h8
-rw-r--r--arch/arm/mach-rockchip/rockchip.c19
-rw-r--r--arch/arm/mm/dma-mapping.c13
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S1
-rw-r--r--arch/arm/net/bpf_jit_32.c42
-rw-r--r--arch/arm/xen/mm.c15
-rw-r--r--arch/arm64/crypto/crc32-arm64.c22
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c3
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c3
-rw-r--r--arch/m32r/kernel/smp.c6
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/kernel/elf.c32
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/kernel/eeh.c11
-rw-r--r--arch/powerpc/kernel/entry_64.S19
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c10
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/spinlock.h2
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c66
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c12
-rw-r--r--arch/x86/kernel/process.c14
-rw-r--r--arch/x86/mm/ioremap.c14
-rw-r--r--arch/x86/net/bpf_jit_comp.c28
-rw-r--r--arch/x86/pci/acpi.c24
-rw-r--r--arch/x86/xen/enlighten.c27
-rw-r--r--arch/x86/xen/suspend.c10
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c60
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/bounce.c2
-rw-r--r--block/elevator.c6
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/sbshc.c22
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c103
-rw-r--r--drivers/ata/ahci_st.c49
-rw-r--r--drivers/ata/libahci.c3
-rw-r--r--drivers/ata/libata-core.c34
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/pata_scc.c1110
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/xen-blkback/blkback.c35
-rw-r--r--drivers/block/zram/zram_drv.c23
-rw-r--r--drivers/bluetooth/bt3c_cs.c3
-rw-r--r--drivers/bluetooth/btbcm.c148
-rw-r--r--drivers/bluetooth/btbcm.h6
-rw-r--r--drivers/bluetooth/btusb.c403
-rw-r--r--drivers/bluetooth/hci_ath.c98
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/omap_l3_noc.c5
-rw-r--r--drivers/bus/omap_l3_noc.h54
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c18
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c213
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/firmware/efi/runtime-map.c6
-rw-r--r--drivers/gpio/gpio-omap.c48
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c144
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c36
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c14
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c29
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c9
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/ide/Kconfig9
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/scc_pata.c887
-rw-r--r--drivers/infiniband/core/addr.c13
-rw-r--r--drivers/infiniband/core/cm.c23
-rw-r--r--drivers/infiniband/core/cm_msgs.h4
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/iwpm_msg.c75
-rw-r--r--drivers/infiniband/core/iwpm_util.c208
-rw-r--r--drivers/infiniband/core/iwpm_util.h15
-rw-r--r--drivers/infiniband/core/umem_odp.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c87
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c41
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c65
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c32
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/iommu/amd_iommu_v2.c1
-rw-r--r--drivers/iommu/arm-smmu.c30
-rw-r--r--drivers/iommu/rockchip-iommu.c4
-rw-r--r--drivers/irqchip/irq-gic.c71
-rw-r--r--drivers/md/dm-crypt.c12
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid0.c5
-rw-r--r--drivers/md/raid5.c123
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h8
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c7
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/core.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/net/can/xilinx_can.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx.c3
-rw-r--r--drivers/net/ethernet/amd/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/hyperv/netvsc.c9
-rw-r--r--drivers/net/ieee802154/at86rf230.c390
-rw-r--r--drivers/net/macvlan.c15
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/mdio-gpio.c5
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h41
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h44
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c17
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/pinctrl/core.c10
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/devicetree.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/power/axp288_fuel_gauge.c1
-rw-r--r--drivers/power/bq27x00_battery.c8
-rw-r--r--drivers/power/collie_battery.c2
-rw-r--r--drivers/power/reset/Kconfig1
-rw-r--r--drivers/power/reset/at91-reset.c4
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c18
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-abx80x.c307
-rw-r--r--drivers/rtc/rtc-armada38x.c24
-rw-r--r--drivers/scsi/3w-9xxx.c57
-rw-r--r--drivers/scsi/3w-9xxx.h5
-rw-r--r--drivers/scsi/3w-sas.c50
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/3w-xxxx.c42
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/aha1542.c23
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/spi/Kconfig3
-rw-r--r--drivers/spi/spi-bcm2835.c5
-rw-r--r--drivers/spi/spi-bitbang.c17
-rw-r--r--drivers/spi/spi-fsl-cpm.c40
-rw-r--r--drivers/spi/spi-fsl-espi.c45
-rw-r--r--drivers/spi/spi-omap2-mcspi.c16
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/staging/media/omap4iss/Kconfig1
-rw-r--r--drivers/staging/media/omap4iss/iss.c11
-rw-r--r--drivers/staging/media/omap4iss/iss.h4
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c12
-rw-r--r--drivers/tty/hvc/hvc_xen.c18
-rw-r--r--drivers/vfio/pci/vfio_pci.c8
-rw-r--r--drivers/vfio/vfio.c21
-rw-r--r--drivers/xen/events/events_2l.c10
-rw-r--r--drivers/xen/events/events_base.c7
-rw-r--r--drivers/xen/gntdev.c28
-rw-r--r--drivers/xen/grant-table.c28
-rw-r--r--drivers/xen/manage.c9
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space.h2
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c29
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/configfs/mount.c2
-rw-r--r--fs/efivarfs/super.c2
-rw-r--r--fs/ext4/Kconfig9
-rw-r--r--fs/ext4/crypto_fname.c280
-rw-r--r--fs/ext4/crypto_key.c1
-rw-r--r--fs/ext4/crypto_policy.c14
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h16
-rw-r--r--fs/ext4/ext4_crypto.h11
-rw-r--r--fs/ext4/extents.c15
-rw-r--r--fs/ext4/extents_status.c8
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/namei.c72
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/f2fs/data.c7
-rw-r--r--fs/f2fs/f2fs.h1
-rw-r--r--fs/f2fs/namei.c8
-rw-r--r--fs/f2fs/super.c1
-rw-r--r--fs/namei.c22
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/nfsd/blocklayout.c11
-rw-r--r--fs/nfsd/nfs4callback.c119
-rw-r--r--fs/nfsd/nfs4state.c147
-rw-r--r--fs/nfsd/state.h19
-rw-r--r--fs/nfsd/xdr4.h1
-rw-r--r--fs/nilfs2/btree.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c13
-rw-r--r--fs/splice.c12
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/compiler-gcc.h16
-rw-r--r--include/linux/compiler-intel.h3
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/irqchip/arm-gic.h2
-rw-r--r--include/linux/libata.h10
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/nilfs2_fs.h2
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/util_macros.h2
-rw-r--r--include/net/cfg802154.h2
-rw-r--r--include/net/codel.h10
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/mac802154.h94
-rw-r--r--include/rdma/ib_addr.h3
-rw-r--r--include/rdma/ib_cm.h7
-rw-r--r--include/rdma/iw_portmap.h25
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/uapi/linux/mpls.h10
-rw-r--r--include/uapi/rdma/rdma_netlink.h1
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/xen-ops.h1
-rw-r--r--init/do_mounts.c5
-rw-r--r--kernel/irq/dummychip.c1
-rw-r--r--kernel/rcu/tree.c16
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Kconfig.kasan8
-rw-r--r--lib/find_last_bit.c41
-rw-r--r--lib/string.c2
-rw-r--r--mm/hwpoison-inject.c13
-rw-r--r--mm/memory-failure.c16
-rw-r--r--mm/page-writeback.c6
-rw-r--r--net/bluetooth/hci_core.c3
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/ieee802154/Makefile4
-rw-r--r--net/ieee802154/nl-phy.c5
-rw-r--r--net/ieee802154/nl802154.c2
-rw-r--r--net/ieee802154/rdev-ops.h85
-rw-r--r--net/ieee802154/trace.c7
-rw-r--r--net/ieee802154/trace.h247
-rw-r--r--net/ipv6/ip6_output.c39
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/mac80211/iface.c12
-rw-r--r--net/mac80211/sta_info.c19
-rw-r--r--net/mac802154/cfg.c9
-rw-r--r--net/mac802154/ieee802154_i.h3
-rw-r--r--net/mac802154/iface.c5
-rw-r--r--net/mac802154/llsec.c4
-rw-r--r--net/mac802154/main.c7
-rw-r--r--net/mpls/af_mpls.c18
-rw-r--r--net/mpls/internal.h10
-rw-r--r--net/netlink/af_netlink.c1
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/rds/connection.c17
-rw-r--r--net/rds/ib_cm.c13
-rw-r--r--net/rds/tcp_connect.c1
-rw-r--r--net/rds/tcp_listen.c46
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_codel.c2
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_gred.c4
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c23
-rw-r--r--tools/lib/api/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.c2
-rw-r--r--tools/perf/bench/futex-requeue.c15
-rw-r--r--tools/perf/bench/numa.c12
-rw-r--r--tools/perf/builtin-kmem.c58
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c10
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/probe-finder.c4
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
410 files changed, 5358 insertions, 4555 deletions
diff --git a/CREDITS b/CREDITS
index 40cc4bfb34db..ec7e6c7fdd1b 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3709,6 +3709,13 @@ N: Dirk Verworner
3709D: Co-author of German book ``Linux-Kernel-Programmierung'' 3709D: Co-author of German book ``Linux-Kernel-Programmierung''
3710D: Co-founder of Berlin Linux User Group 3710D: Co-founder of Berlin Linux User Group
3711 3711
3712N: Andrew Victor
3713E: linux@maxim.org.za
3714W: http://maxim.org.za/at91_26.html
3715D: First maintainer of Atmel ARM-based SoC, aka AT91
3716D: Introduced support for at91rm9200, the first chip of AT91 family
3717S: South Africa
3718
3712N: Riku Voipio 3719N: Riku Voipio
3713E: riku.voipio@iki.fi 3720E: riku.voipio@iki.fi
3714D: Author of PCA9532 LED and Fintek f75375s hwmon driver 3721D: Author of PCA9532 LED and Fintek f75375s hwmon driver
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 653d5d739d7f..31d1d658827f 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -505,7 +505,10 @@ at module load time (for a module) with:
505 505
506The addresses are normal I2C addresses. The adapter is the string 506The addresses are normal I2C addresses. The adapter is the string
507name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. 507name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
508It is *NOT* i2c-<n> itself. 508It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
509spaces, so if the name is "This is an I2C chip" you can say
510adapter_name=ThisisanI2cchip. This is because it's hard to pass in
511spaces in kernel parameters.
509 512
510The debug flags are bit flags for each BMC found, they are: 513The debug flags are bit flags for each BMC found, they are:
511IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8 514IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index 750401f91341..15dfce708ebf 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -253,7 +253,7 @@ input driver:
253GPIO support 253GPIO support
254~~~~~~~~~~~~ 254~~~~~~~~~~~~
255ACPI 5 introduced two new resources to describe GPIO connections: GpioIo 255ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
256and GpioInt. These resources are used be used to pass GPIO numbers used by 256and GpioInt. These resources can be used to pass GPIO numbers used by
257the device to the driver. ACPI 5.1 extended this with _DSD (Device 257the device to the driver. ACPI 5.1 extended this with _DSD (Device
258Specific Data) which made it possible to name the GPIOs among other things. 258Specific Data) which made it possible to name the GPIOs among other things.
259 259
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt
index ae36fcf86dc7..f35dad11f0de 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/acpi/gpio-properties.txt
@@ -1,9 +1,9 @@
1_DSD Device Properties Related to GPIO 1_DSD Device Properties Related to GPIO
2-------------------------------------- 2--------------------------------------
3 3
4With the release of ACPI 5.1 and the _DSD configuration objecte names 4With the release of ACPI 5.1, the _DSD configuration object finally
5can finally be given to GPIOs (and other things as well) returned by 5allows names to be given to GPIOs (and other things as well) returned
6_CRS. Previously, we were only able to use an integer index to find 6by _CRS. Previously, we were only able to use an integer index to find
7the corresponding GPIO, which is pretty error prone (it depends on 7the corresponding GPIO, which is pretty error prone (it depends on
8the _CRS output ordering, for example). 8the _CRS output ordering, for example).
9 9
diff --git a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
index 974624ea68f6..161448da959d 100644
--- a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
+++ b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
@@ -6,6 +6,7 @@ provided by Arteris.
6Required properties: 6Required properties:
7- compatible : Should be "ti,omap3-l3-smx" for OMAP3 family 7- compatible : Should be "ti,omap3-l3-smx" for OMAP3 family
8 Should be "ti,omap4-l3-noc" for OMAP4 family 8 Should be "ti,omap4-l3-noc" for OMAP4 family
9 Should be "ti,omap5-l3-noc" for OMAP5 family
9 Should be "ti,dra7-l3-noc" for DRA7 family 10 Should be "ti,dra7-l3-noc" for DRA7 family
10 Should be "ti,am4372-l3-noc" for AM43 family 11 Should be "ti,am4372-l3-noc" for AM43 family
11- reg: Contains L3 register address range for each noc domain. 12- reg: Contains L3 register address range for each noc domain.
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
index a4873e5e3e36..e30e184f50c7 100644
--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
38 80 81 68 69 38 80 81 68 69
39 70 71 72 73 39 70 71 72 73
40 74 75 76 77>; 40 74 75 76 77>;
41 interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", 41 interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
42 "saif0", "saif1", "i2c0", "i2c1", 42 "saif0", "saif1", "i2c0", "i2c1",
43 "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", 43 "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
44 "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; 44 "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
new file mode 100644
index 000000000000..be789685a1c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
@@ -0,0 +1,30 @@
1Abracon ABX80X I2C ultra low power RTC/Alarm chip
2
3The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801,
4ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805
5is the superset of ab180x.
6
7Required properties:
8
9 - "compatible": should one of:
10 "abracon,abx80x"
11 "abracon,ab0801"
12 "abracon,ab0803"
13 "abracon,ab0804"
14 "abracon,ab0805"
15 "abracon,ab1801"
16 "abracon,ab1803"
17 "abracon,ab1804"
18 "abracon,ab1805"
19 Using "abracon,abx80x" will enable chip autodetection.
20 - "reg": I2C bus address of the device
21
22Optional properties:
23
24The abx804 and abx805 have a trickle charger that is able to charge the
25connected battery or supercap. Both the following properties have to be defined
26and valid to enable charging:
27
28 - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
29 - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
30 resistor, the other values are in ohm.
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
index 092fc10961fe..4692241789b1 100644
--- a/Documentation/kasan.txt
+++ b/Documentation/kasan.txt
@@ -9,7 +9,9 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds
9bugs. 9bugs.
10 10
11KASan uses compile-time instrumentation for checking every memory access, 11KASan uses compile-time instrumentation for checking every memory access,
12therefore you will need a certain version of GCC > 4.9.2 12therefore you will need a gcc version of 4.9.2 or later. KASan could detect out
13of bounds accesses to stack or global variables, but only if gcc 5.0 or later was
14used to built the kernel.
13 15
14Currently KASan is supported only for x86_64 architecture and requires that the 16Currently KASan is supported only for x86_64 architecture and requires that the
15kernel be built with the SLUB allocator. 17kernel be built with the SLUB allocator.
@@ -23,8 +25,8 @@ To enable KASAN configure kernel with:
23 25
24and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline 26and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline
25is compiler instrumentation types. The former produces smaller binary the 27is compiler instrumentation types. The former produces smaller binary the
26latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or 28latter is 1.1 - 2 times faster. Inline instrumentation requires a gcc version
27latter. 29of 5.0 or later.
28 30
29Currently KASAN works only with the SLUB memory allocator. 31Currently KASAN works only with the SLUB memory allocator.
30For better bug detection and nicer report, enable CONFIG_STACKTRACE and put 32For better bug detection and nicer report, enable CONFIG_STACKTRACE and put
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index ba0a2a4a54ba..ded69794a5c0 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -74,23 +74,22 @@ Causes of transaction aborts
74Syscalls 74Syscalls
75======== 75========
76 76
77Syscalls made from within an active transaction will not be performed and the 77Performing syscalls from within transaction is not recommended, and can lead
78transaction will be doomed by the kernel with the failure code TM_CAUSE_SYSCALL 78to unpredictable results.
79| TM_CAUSE_PERSISTENT.
80 79
81Syscalls made from within a suspended transaction are performed as normal and 80Syscalls do not by design abort transactions, but beware: The kernel code will
82the transaction is not explicitly doomed by the kernel. However, what the 81not be running in transactional state. The effect of syscalls will always
83kernel does to perform the syscall may result in the transaction being doomed 82remain visible, but depending on the call they may abort your transaction as a
84by the hardware. The syscall is performed in suspended mode so any side 83side-effect, read soon-to-be-aborted transactional data that should not remain
85effects will be persistent, independent of transaction success or failure. No 84invisible, etc. If you constantly retry a transaction that constantly aborts
86guarantees are provided by the kernel about which syscalls will affect 85itself by calling a syscall, you'll have a livelock & make no progress.
87transaction success.
88 86
89Care must be taken when relying on syscalls to abort during active transactions 87Simple syscalls (e.g. sigprocmask()) "could" be OK. Even things like write()
90if the calls are made via a library. Libraries may cache values (which may 88from, say, printf() should be OK as long as the kernel does not access any
91give the appearance of success) or perform operations that cause transaction 89memory that was accessed transactionally.
92failure before entering the kernel (which may produce different failure codes). 90
93Examples are glibc's getpid() and lazy symbol resolution. 91Consider any syscalls that happen to work as debug-only -- not recommended for
92production use. Best to queue them up till after the transaction is over.
94 93
95 94
96Signals 95Signals
@@ -177,7 +176,8 @@ kernel aborted a transaction:
177 TM_CAUSE_RESCHED Thread was rescheduled. 176 TM_CAUSE_RESCHED Thread was rescheduled.
178 TM_CAUSE_TLBI Software TLB invalid. 177 TM_CAUSE_TLBI Software TLB invalid.
179 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. 178 TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
180 TM_CAUSE_SYSCALL Syscall from active transaction. 179 TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
180 transactions for consistency will use this.
181 TM_CAUSE_SIGNAL Signal delivered. 181 TM_CAUSE_SIGNAL Signal delivered.
182 TM_CAUSE_MISC Currently unused. 182 TM_CAUSE_MISC Currently unused.
183 TM_CAUSE_ALIGNMENT Alignment fault. 183 TM_CAUSE_ALIGNMENT Alignment fault.
diff --git a/MAINTAINERS b/MAINTAINERS
index 16227759dfa8..5d87ccbee19b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -892,11 +892,10 @@ S: Maintained
892F: arch/arm/mach-alpine/ 892F: arch/arm/mach-alpine/
893 893
894ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES 894ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
895M: Andrew Victor <linux@maxim.org.za>
896M: Nicolas Ferre <nicolas.ferre@atmel.com> 895M: Nicolas Ferre <nicolas.ferre@atmel.com>
896M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
897M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> 897M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
898L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 898L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
899W: http://maxim.org.za/at91_26.html
900W: http://www.linux4sam.org 899W: http://www.linux4sam.org
901S: Supported 900S: Supported
902F: arch/arm/mach-at91/ 901F: arch/arm/mach-at91/
@@ -990,6 +989,12 @@ F: drivers/clocksource/timer-prima2.c
990F: drivers/clocksource/timer-atlas7.c 989F: drivers/clocksource/timer-atlas7.c
991N: [^a-z]sirf 990N: [^a-z]sirf
992 991
992ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
993M: Baruch Siach <baruch@tkos.co.il>
994L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
995S: Maintained
996N: digicolor
997
993ARM/EBSA110 MACHINE SUPPORT 998ARM/EBSA110 MACHINE SUPPORT
994M: Russell King <linux@arm.linux.org.uk> 999M: Russell King <linux@arm.linux.org.uk>
995L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1000L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1439,9 +1444,10 @@ ARM/SOCFPGA ARCHITECTURE
1439M: Dinh Nguyen <dinguyen@opensource.altera.com> 1444M: Dinh Nguyen <dinguyen@opensource.altera.com>
1440S: Maintained 1445S: Maintained
1441F: arch/arm/mach-socfpga/ 1446F: arch/arm/mach-socfpga/
1447F: arch/arm/boot/dts/socfpga*
1448F: arch/arm/configs/socfpga_defconfig
1442W: http://www.rocketboards.org 1449W: http://www.rocketboards.org
1443T: git://git.rocketboards.org/linux-socfpga.git 1450T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
1444T: git://git.rocketboards.org/linux-socfpga-next.git
1445 1451
1446ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT 1452ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
1447M: Dinh Nguyen <dinguyen@opensource.altera.com> 1453M: Dinh Nguyen <dinguyen@opensource.altera.com>
@@ -2116,8 +2122,9 @@ S: Supported
2116F: drivers/net/ethernet/broadcom/bnx2x/ 2122F: drivers/net/ethernet/broadcom/bnx2x/
2117 2123
2118BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE 2124BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
2119M: Christian Daudt <bcm@fixthebug.org>
2120M: Florian Fainelli <f.fainelli@gmail.com> 2125M: Florian Fainelli <f.fainelli@gmail.com>
2126M: Ray Jui <rjui@broadcom.com>
2127M: Scott Branden <sbranden@broadcom.com>
2121L: bcm-kernel-feedback-list@broadcom.com 2128L: bcm-kernel-feedback-list@broadcom.com
2122T: git git://github.com/broadcom/mach-bcm 2129T: git git://github.com/broadcom/mach-bcm
2123S: Maintained 2130S: Maintained
@@ -2168,7 +2175,6 @@ S: Maintained
2168F: drivers/usb/gadget/udc/bcm63xx_udc.* 2175F: drivers/usb/gadget/udc/bcm63xx_udc.*
2169 2176
2170BROADCOM BCM7XXX ARM ARCHITECTURE 2177BROADCOM BCM7XXX ARM ARCHITECTURE
2171M: Marc Carino <marc.ceeeee@gmail.com>
2172M: Brian Norris <computersforpeace@gmail.com> 2178M: Brian Norris <computersforpeace@gmail.com>
2173M: Gregory Fong <gregory.0xf0@gmail.com> 2179M: Gregory Fong <gregory.0xf0@gmail.com>
2174M: Florian Fainelli <f.fainelli@gmail.com> 2180M: Florian Fainelli <f.fainelli@gmail.com>
@@ -3413,6 +3419,13 @@ F: drivers/gpu/drm/rcar-du/
3413F: drivers/gpu/drm/shmobile/ 3419F: drivers/gpu/drm/shmobile/
3414F: include/linux/platform_data/shmob_drm.h 3420F: include/linux/platform_data/shmob_drm.h
3415 3421
3422DRM DRIVERS FOR ROCKCHIP
3423M: Mark Yao <mark.yao@rock-chips.com>
3424L: dri-devel@lists.freedesktop.org
3425S: Maintained
3426F: drivers/gpu/drm/rockchip/
3427F: Documentation/devicetree/bindings/video/rockchip*
3428
3416DSBR100 USB FM RADIO DRIVER 3429DSBR100 USB FM RADIO DRIVER
3417M: Alexey Klimov <klimov.linux@gmail.com> 3430M: Alexey Klimov <klimov.linux@gmail.com>
3418L: linux-media@vger.kernel.org 3431L: linux-media@vger.kernel.org
@@ -4364,11 +4377,10 @@ F: fs/gfs2/
4364F: include/uapi/linux/gfs2_ondisk.h 4377F: include/uapi/linux/gfs2_ondisk.h
4365 4378
4366GIGASET ISDN DRIVERS 4379GIGASET ISDN DRIVERS
4367M: Hansjoerg Lipp <hjlipp@web.de> 4380M: Paul Bolle <pebolle@tiscali.nl>
4368M: Tilman Schmidt <tilman@imap.cc>
4369L: gigaset307x-common@lists.sourceforge.net 4381L: gigaset307x-common@lists.sourceforge.net
4370W: http://gigaset307x.sourceforge.net/ 4382W: http://gigaset307x.sourceforge.net/
4371S: Maintained 4383S: Odd Fixes
4372F: Documentation/isdn/README.gigaset 4384F: Documentation/isdn/README.gigaset
4373F: drivers/isdn/gigaset/ 4385F: drivers/isdn/gigaset/
4374F: include/uapi/linux/gigaset_dev.h 4386F: include/uapi/linux/gigaset_dev.h
@@ -5035,17 +5047,19 @@ S: Orphan
5035F: drivers/video/fbdev/imsttfb.c 5047F: drivers/video/fbdev/imsttfb.c
5036 5048
5037INFINIBAND SUBSYSTEM 5049INFINIBAND SUBSYSTEM
5038M: Roland Dreier <roland@kernel.org> 5050M: Doug Ledford <dledford@redhat.com>
5039M: Sean Hefty <sean.hefty@intel.com> 5051M: Sean Hefty <sean.hefty@intel.com>
5040M: Hal Rosenstock <hal.rosenstock@gmail.com> 5052M: Hal Rosenstock <hal.rosenstock@gmail.com>
5041L: linux-rdma@vger.kernel.org 5053L: linux-rdma@vger.kernel.org
5042W: http://www.openfabrics.org/ 5054W: http://www.openfabrics.org/
5043Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5055Q: http://patchwork.kernel.org/project/linux-rdma/list/
5044T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git 5056T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
5045S: Supported 5057S: Supported
5046F: Documentation/infiniband/ 5058F: Documentation/infiniband/
5047F: drivers/infiniband/ 5059F: drivers/infiniband/
5048F: include/uapi/linux/if_infiniband.h 5060F: include/uapi/linux/if_infiniband.h
5061F: include/uapi/rdma/
5062F: include/rdma/
5049 5063
5050INOTIFY 5064INOTIFY
5051M: John McCutchan <john@johnmccutchan.com> 5065M: John McCutchan <john@johnmccutchan.com>
@@ -5798,6 +5812,7 @@ F: drivers/scsi/53c700*
5798LED SUBSYSTEM 5812LED SUBSYSTEM
5799M: Bryan Wu <cooloney@gmail.com> 5813M: Bryan Wu <cooloney@gmail.com>
5800M: Richard Purdie <rpurdie@rpsys.net> 5814M: Richard Purdie <rpurdie@rpsys.net>
5815M: Jacek Anaszewski <j.anaszewski@samsung.com>
5801L: linux-leds@vger.kernel.org 5816L: linux-leds@vger.kernel.org
5802T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git 5817T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
5803S: Maintained 5818S: Maintained
@@ -6943,6 +6958,17 @@ T: git git://git.rocketboards.org/linux-socfpga-next.git
6943S: Maintained 6958S: Maintained
6944F: arch/nios2/ 6959F: arch/nios2/
6945 6960
6961NOKIA N900 POWER SUPPLY DRIVERS
6962M: Pali Rohár <pali.rohar@gmail.com>
6963S: Maintained
6964F: include/linux/power/bq2415x_charger.h
6965F: include/linux/power/bq27x00_battery.h
6966F: include/linux/power/isp1704_charger.h
6967F: drivers/power/bq2415x_charger.c
6968F: drivers/power/bq27x00_battery.c
6969F: drivers/power/isp1704_charger.c
6970F: drivers/power/rx51_battery.c
6971
6946NTB DRIVER 6972NTB DRIVER
6947M: Jon Mason <jdmason@kudzu.us> 6973M: Jon Mason <jdmason@kudzu.us>
6948M: Dave Jiang <dave.jiang@intel.com> 6974M: Dave Jiang <dave.jiang@intel.com>
@@ -8800,10 +8826,11 @@ W: http://www.emulex.com
8800S: Supported 8826S: Supported
8801F: drivers/scsi/be2iscsi/ 8827F: drivers/scsi/be2iscsi/
8802 8828
8803SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER 8829Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
8804M: Sathya Perla <sathya.perla@emulex.com> 8830M: Sathya Perla <sathya.perla@avagotech.com>
8805M: Subbu Seetharaman <subbu.seetharaman@emulex.com> 8831M: Ajit Khaparde <ajit.khaparde@avagotech.com>
8806M: Ajit Khaparde <ajit.khaparde@emulex.com> 8832M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
8833M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
8807L: netdev@vger.kernel.org 8834L: netdev@vger.kernel.org
8808W: http://www.emulex.com 8835W: http://www.emulex.com
8809S: Supported 8836S: Supported
@@ -11030,6 +11057,7 @@ F: drivers/media/pci/zoran/
11030ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER 11057ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
11031M: Minchan Kim <minchan@kernel.org> 11058M: Minchan Kim <minchan@kernel.org>
11032M: Nitin Gupta <ngupta@vflare.org> 11059M: Nitin Gupta <ngupta@vflare.org>
11060R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
11033L: linux-kernel@vger.kernel.org 11061L: linux-kernel@vger.kernel.org
11034S: Maintained 11062S: Maintained
11035F: drivers/block/zram/ 11063F: drivers/block/zram/
diff --git a/Makefile b/Makefile
index 7ff1239f9cd2..eae539d69bf3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 1 2PATCHLEVEL = 1
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc3
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 8ae29c955c11..c17097d2c167 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -49,7 +49,7 @@
49 pinctrl-0 = <&matrix_keypad_pins>; 49 pinctrl-0 = <&matrix_keypad_pins>;
50 50
51 debounce-delay-ms = <5>; 51 debounce-delay-ms = <5>;
52 col-scan-delay-us = <1500>; 52 col-scan-delay-us = <5>;
53 53
54 row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */ 54 row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */
55 &gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */ 55 &gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */
@@ -473,7 +473,7 @@
473 interrupt-parent = <&gpio0>; 473 interrupt-parent = <&gpio0>;
474 interrupts = <31 0>; 474 interrupts = <31 0>;
475 475
476 wake-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>; 476 reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
477 477
478 touchscreen-size-x = <480>; 478 touchscreen-size-x = <480>;
479 touchscreen-size-y = <272>; 479 touchscreen-size-y = <272>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 15f198e4864d..7128fad991ac 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -18,6 +18,7 @@
18 aliases { 18 aliases {
19 rtc0 = &mcp_rtc; 19 rtc0 = &mcp_rtc;
20 rtc1 = &tps659038_rtc; 20 rtc1 = &tps659038_rtc;
21 rtc2 = &rtc;
21 }; 22 };
22 23
23 memory { 24 memory {
@@ -83,7 +84,7 @@
83 gpio_fan: gpio_fan { 84 gpio_fan: gpio_fan {
84 /* Based on 5v 500mA AFB02505HHB */ 85 /* Based on 5v 500mA AFB02505HHB */
85 compatible = "gpio-fan"; 86 compatible = "gpio-fan";
86 gpios = <&tps659038_gpio 1 GPIO_ACTIVE_HIGH>; 87 gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>;
87 gpio-fan,speed-map = <0 0>, 88 gpio-fan,speed-map = <0 0>,
88 <13000 1>; 89 <13000 1>;
89 #cooling-cells = <2>; 90 #cooling-cells = <2>;
@@ -130,8 +131,8 @@
130 131
131 uart3_pins_default: uart3_pins_default { 132 uart3_pins_default: uart3_pins_default {
132 pinctrl-single,pins = < 133 pinctrl-single,pins = <
133 0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd.rxd */ 134 0x3f8 (PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */
134 0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd.txd */ 135 0x3fc (PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */
135 >; 136 >;
136 }; 137 };
137 138
@@ -455,7 +456,7 @@
455 mcp_rtc: rtc@6f { 456 mcp_rtc: rtc@6f {
456 compatible = "microchip,mcp7941x"; 457 compatible = "microchip,mcp7941x";
457 reg = <0x6f>; 458 reg = <0x6f>;
458 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>; /* IRQ_SYS_1N */ 459 interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */
459 460
460 pinctrl-names = "default"; 461 pinctrl-names = "default";
461 pinctrl-0 = <&mcp79410_pins_default>; 462 pinctrl-0 = <&mcp79410_pins_default>;
@@ -478,7 +479,7 @@
478&uart3 { 479&uart3 {
479 status = "okay"; 480 status = "okay";
480 interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, 481 interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
481 <&dra7_pmx_core 0x248>; 482 <&dra7_pmx_core 0x3f8>;
482 483
483 pinctrl-names = "default"; 484 pinctrl-names = "default";
484 pinctrl-0 = <&uart3_pins_default>; 485 pinctrl-0 = <&uart3_pins_default>;
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index e3b08fb959e5..990e8a2100f0 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -105,6 +105,10 @@
105 }; 105 };
106 106
107 internal-regs { 107 internal-regs {
108 rtc@10300 {
109 /* No crystal connected to the internal RTC */
110 status = "disabled";
111 };
108 serial@12000 { 112 serial@12000 {
109 status = "okay"; 113 status = "okay";
110 }; 114 };
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5332b57b4950..f03a091cd076 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -911,7 +911,7 @@
911 ti,clock-cycles = <16>; 911 ti,clock-cycles = <16>;
912 912
913 reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>, 913 reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>,
914 <0x4ae06014 0x4>, <0x4a003b20 0x8>, 914 <0x4ae06014 0x4>, <0x4a003b20 0xc>,
915 <0x4ae0c158 0x4>; 915 <0x4ae0c158 0x4>;
916 reg-names = "setup-address", "control-address", 916 reg-names = "setup-address", "control-address",
917 "int-address", "efuse-address", 917 "int-address", "efuse-address",
@@ -944,7 +944,7 @@
944 ti,clock-cycles = <16>; 944 ti,clock-cycles = <16>;
945 945
946 reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>, 946 reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>,
947 <0x4ae06010 0x4>, <0x4a0025cc 0x8>, 947 <0x4ae06010 0x4>, <0x4a0025cc 0xc>,
948 <0x4a002470 0x4>; 948 <0x4a002470 0x4>;
949 reg-names = "setup-address", "control-address", 949 reg-names = "setup-address", "control-address",
950 "int-address", "efuse-address", 950 "int-address", "efuse-address",
@@ -977,7 +977,7 @@
977 ti,clock-cycles = <16>; 977 ti,clock-cycles = <16>;
978 978
979 reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>, 979 reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>,
980 <0x4ae06010 0x4>, <0x4a0025e0 0x8>, 980 <0x4ae06010 0x4>, <0x4a0025e0 0xc>,
981 <0x4a00246c 0x4>; 981 <0x4a00246c 0x4>;
982 reg-names = "setup-address", "control-address", 982 reg-names = "setup-address", "control-address",
983 "int-address", "efuse-address", 983 "int-address", "efuse-address",
@@ -1010,7 +1010,7 @@
1010 ti,clock-cycles = <16>; 1010 ti,clock-cycles = <16>;
1011 1011
1012 reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>, 1012 reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>,
1013 <0x4ae06010 0x4>, <0x4a003b08 0x8>, 1013 <0x4ae06010 0x4>, <0x4a003b08 0xc>,
1014 <0x4ae0c154 0x4>; 1014 <0x4ae0c154 0x4>;
1015 reg-names = "setup-address", "control-address", 1015 reg-names = "setup-address", "control-address",
1016 "int-address", "efuse-address", 1016 "int-address", "efuse-address",
@@ -1203,7 +1203,7 @@
1203 status = "disabled"; 1203 status = "disabled";
1204 }; 1204 };
1205 1205
1206 rtc@48838000 { 1206 rtc: rtc@48838000 {
1207 compatible = "ti,am3352-rtc"; 1207 compatible = "ti,am3352-rtc";
1208 reg = <0x48838000 0x100>; 1208 reg = <0x48838000 0x100>;
1209 interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, 1209 interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 8de12af7c276..d6b49e5b32e9 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -9,6 +9,7 @@
9 9
10#include <dt-bindings/sound/samsung-i2s.h> 10#include <dt-bindings/sound/samsung-i2s.h>
11#include <dt-bindings/input/input.h> 11#include <dt-bindings/input/input.h>
12#include <dt-bindings/clock/maxim,max77686.h>
12#include "exynos4412.dtsi" 13#include "exynos4412.dtsi"
13 14
14/ { 15/ {
@@ -105,6 +106,8 @@
105 106
106 rtc@10070000 { 107 rtc@10070000 {
107 status = "okay"; 108 status = "okay";
109 clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>;
110 clock-names = "rtc", "rtc_src";
108 }; 111 };
109 112
110 g2d@10800000 { 113 g2d@10800000 {
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index 2657e842e5a5..1eca97ee4bd6 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -567,6 +567,7 @@
567 num-slots = <1>; 567 num-slots = <1>;
568 broken-cd; 568 broken-cd;
569 cap-sdio-irq; 569 cap-sdio-irq;
570 keep-power-in-suspend;
570 card-detect-delay = <200>; 571 card-detect-delay = <200>;
571 samsung,dw-mshc-ciu-div = <3>; 572 samsung,dw-mshc-ciu-div = <3>;
572 samsung,dw-mshc-sdr-timing = <2 3>; 573 samsung,dw-mshc-sdr-timing = <2 3>;
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
index 5d31fc140823..2180a0152c9b 100644
--- a/arch/arm/boot/dts/exynos5420-trip-points.dtsi
+++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
@@ -28,7 +28,7 @@ trips {
28 type = "active"; 28 type = "active";
29 }; 29 };
30 cpu-crit-0 { 30 cpu-crit-0 {
31 temperature = <1200000>; /* millicelsius */ 31 temperature = <120000>; /* millicelsius */
32 hysteresis = <0>; /* millicelsius */ 32 hysteresis = <0>; /* millicelsius */
33 type = "critical"; 33 type = "critical";
34 }; 34 };
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index f67b23f303c3..45317538bbae 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -536,6 +536,7 @@
536 clock-names = "dp"; 536 clock-names = "dp";
537 phys = <&dp_phy>; 537 phys = <&dp_phy>;
538 phy-names = "dp"; 538 phy-names = "dp";
539 power-domains = <&disp_pd>;
539 }; 540 };
540 541
541 mipi_phy: video-phy@10040714 { 542 mipi_phy: video-phy@10040714 {
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
index 48adfa8f4300..356e963edf11 100644
--- a/arch/arm/boot/dts/exynos5440-trip-points.dtsi
+++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
@@ -18,7 +18,7 @@ trips {
18 type = "active"; 18 type = "active";
19 }; 19 };
20 cpu-crit-0 { 20 cpu-crit-0 {
21 temperature = <1050000>; /* millicelsius */ 21 temperature = <105000>; /* millicelsius */
22 hysteresis = <0>; /* millicelsius */ 22 hysteresis = <0>; /* millicelsius */
23 type = "critical"; 23 type = "critical";
24 }; 24 };
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
index 7e6eef2488e8..82045398bf1f 100644
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14/dts-v1/; 14/dts-v1/;
15#include <dt-bindings/gpio/gpio.h>
15#include "imx23.dtsi" 16#include "imx23.dtsi"
16 17
17/ { 18/ {
@@ -93,6 +94,7 @@
93 94
94 ahb@80080000 { 95 ahb@80080000 {
95 usb0: usb@80080000 { 96 usb0: usb@80080000 {
97 dr_mode = "host";
96 vbus-supply = <&reg_usb0_vbus>; 98 vbus-supply = <&reg_usb0_vbus>;
97 status = "okay"; 99 status = "okay";
98 }; 100 };
@@ -122,7 +124,7 @@
122 124
123 user { 125 user {
124 label = "green"; 126 label = "green";
125 gpios = <&gpio2 1 1>; 127 gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
126 }; 128 };
127 }; 129 };
128}; 130};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index e4d3aecc4ed2..677f81d9dcd5 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -428,6 +428,7 @@
428 428
429 pwm4: pwm@53fc8000 { 429 pwm4: pwm@53fc8000 {
430 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; 430 compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
431 #pwm-cells = <2>;
431 reg = <0x53fc8000 0x4000>; 432 reg = <0x53fc8000 0x4000>;
432 clocks = <&clks 108>, <&clks 52>; 433 clocks = <&clks 108>, <&clks 52>;
433 clock-names = "ipg", "per"; 434 clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 25e25f82fbae..4e073e854742 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -913,7 +913,7 @@
913 80 81 68 69 913 80 81 68 69
914 70 71 72 73 914 70 71 72 73
915 74 75 76 77>; 915 74 75 76 77>;
916 interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", 916 interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
917 "saif0", "saif1", "i2c0", "i2c1", 917 "saif0", "saif1", "i2c0", "i2c1",
918 "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", 918 "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
919 "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; 919 "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 19cc269a08d4..1ce6133b67f5 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -31,6 +31,7 @@
31 regulator-min-microvolt = <5000000>; 31 regulator-min-microvolt = <5000000>;
32 regulator-max-microvolt = <5000000>; 32 regulator-max-microvolt = <5000000>;
33 gpio = <&gpio4 15 0>; 33 gpio = <&gpio4 15 0>;
34 enable-active-high;
34 }; 35 };
35 36
36 reg_usb_h1_vbus: regulator@1 { 37 reg_usb_h1_vbus: regulator@1 {
@@ -40,6 +41,7 @@
40 regulator-min-microvolt = <5000000>; 41 regulator-min-microvolt = <5000000>;
41 regulator-max-microvolt = <5000000>; 42 regulator-max-microvolt = <5000000>;
42 gpio = <&gpio1 0 0>; 43 gpio = <&gpio1 0 0>;
44 enable-active-high;
43 }; 45 };
44 }; 46 };
45 47
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 46b2fed7c319..3b24b12651b2 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -185,7 +185,6 @@
185&i2c3 { 185&i2c3 {
186 pinctrl-names = "default"; 186 pinctrl-names = "default";
187 pinctrl-0 = <&pinctrl_i2c3>; 187 pinctrl-0 = <&pinctrl_i2c3>;
188 pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
189 status = "okay"; 188 status = "okay";
190 189
191 max7310_a: gpio@30 { 190 max7310_a: gpio@30 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index a29315833ecd..5c16145920ea 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -498,6 +498,8 @@
498 DRVDD-supply = <&vmmc2>; 498 DRVDD-supply = <&vmmc2>;
499 IOVDD-supply = <&vio>; 499 IOVDD-supply = <&vio>;
500 DVDD-supply = <&vio>; 500 DVDD-supply = <&vio>;
501
502 ai3x-micbias-vg = <1>;
501 }; 503 };
502 504
503 tlv320aic3x_aux: tlv320aic3x@19 { 505 tlv320aic3x_aux: tlv320aic3x@19 {
@@ -509,6 +511,8 @@
509 DRVDD-supply = <&vmmc2>; 511 DRVDD-supply = <&vmmc2>;
510 IOVDD-supply = <&vio>; 512 IOVDD-supply = <&vio>;
511 DVDD-supply = <&vio>; 513 DVDD-supply = <&vio>;
514
515 ai3x-micbias-vg = <2>;
512 }; 516 };
513 517
514 tsl2563: tsl2563@29 { 518 tsl2563: tsl2563@29 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index d18a90f5eca3..69a40cfc1f29 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -456,6 +456,7 @@
456 }; 456 };
457 457
458 mmu_isp: mmu@480bd400 { 458 mmu_isp: mmu@480bd400 {
459 #iommu-cells = <0>;
459 compatible = "ti,omap2-iommu"; 460 compatible = "ti,omap2-iommu";
460 reg = <0x480bd400 0x80>; 461 reg = <0x480bd400 0x80>;
461 interrupts = <24>; 462 interrupts = <24>;
@@ -464,6 +465,7 @@
464 }; 465 };
465 466
466 mmu_iva: mmu@5d000000 { 467 mmu_iva: mmu@5d000000 {
468 #iommu-cells = <0>;
467 compatible = "ti,omap2-iommu"; 469 compatible = "ti,omap2-iommu";
468 reg = <0x5d000000 0x80>; 470 reg = <0x5d000000 0x80>;
469 interrupts = <28>; 471 interrupts = <28>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index efe5f737f39b..7d24ae0306b5 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -128,7 +128,7 @@
128 * hierarchy. 128 * hierarchy.
129 */ 129 */
130 ocp { 130 ocp {
131 compatible = "ti,omap4-l3-noc", "simple-bus"; 131 compatible = "ti,omap5-l3-noc", "simple-bus";
132 #address-cells = <1>; 132 #address-cells = <1>;
133 #size-cells = <1>; 133 #size-cells = <1>;
134 ranges; 134 ranges;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 74c3212f1f11..824ddab9c3ad 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -545,7 +545,7 @@
545 compatible = "adi,adv7511w"; 545 compatible = "adi,adv7511w";
546 reg = <0x39>; 546 reg = <0x39>;
547 interrupt-parent = <&gpio3>; 547 interrupt-parent = <&gpio3>;
548 interrupts = <29 IRQ_TYPE_EDGE_FALLING>; 548 interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
549 549
550 adi,input-depth = <8>; 550 adi,input-depth = <8>;
551 adi,input-colorspace = "rgb"; 551 adi,input-colorspace = "rgb";
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index bfd3f1c734b8..2201cd5da3bb 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -1017,23 +1017,6 @@
1017 status = "disabled"; 1017 status = "disabled";
1018 }; 1018 };
1019 1019
1020 vmmci: regulator-gpio {
1021 compatible = "regulator-gpio";
1022
1023 regulator-min-microvolt = <1800000>;
1024 regulator-max-microvolt = <2900000>;
1025 regulator-name = "mmci-reg";
1026 regulator-type = "voltage";
1027
1028 startup-delay-us = <100>;
1029 enable-active-high;
1030
1031 states = <1800000 0x1
1032 2900000 0x0>;
1033
1034 status = "disabled";
1035 };
1036
1037 mcde@a0350000 { 1020 mcde@a0350000 {
1038 compatible = "stericsson,mcde"; 1021 compatible = "stericsson,mcde";
1039 reg = <0xa0350000 0x1000>, /* MCDE */ 1022 reg = <0xa0350000 0x1000>, /* MCDE */
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index bf8f0eddc2c0..744c1e3a744d 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -111,6 +111,21 @@
111 pinctrl-1 = <&i2c3_sleep_mode>; 111 pinctrl-1 = <&i2c3_sleep_mode>;
112 }; 112 };
113 113
114 vmmci: regulator-gpio {
115 compatible = "regulator-gpio";
116
117 regulator-min-microvolt = <1800000>;
118 regulator-max-microvolt = <2900000>;
119 regulator-name = "mmci-reg";
120 regulator-type = "voltage";
121
122 startup-delay-us = <100>;
123 enable-active-high;
124
125 states = <1800000 0x1
126 2900000 0x0>;
127 };
128
114 // External Micro SD slot 129 // External Micro SD slot
115 sdi0_per1@80126000 { 130 sdi0_per1@80126000 {
116 arm,primecell-periphid = <0x10480180>; 131 arm,primecell-periphid = <0x10480180>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 206826a855c0..1bc84ebdccaa 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -146,8 +146,21 @@
146 }; 146 };
147 147
148 vmmci: regulator-gpio { 148 vmmci: regulator-gpio {
149 compatible = "regulator-gpio";
150
149 gpios = <&gpio7 4 0x4>; 151 gpios = <&gpio7 4 0x4>;
150 enable-gpio = <&gpio6 25 0x4>; 152 enable-gpio = <&gpio6 25 0x4>;
153
154 regulator-min-microvolt = <1800000>;
155 regulator-max-microvolt = <2900000>;
156 regulator-name = "mmci-reg";
157 regulator-type = "voltage";
158
159 startup-delay-us = <100>;
160 enable-active-high;
161
162 states = <1800000 0x1
163 2900000 0x0>;
151 }; 164 };
152 165
153 // External Micro SD slot 166 // External Micro SD slot
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index ab86655c1f4b..0ca4a3eaf65d 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -39,11 +39,14 @@ CONFIG_ARCH_HIP04=y
39CONFIG_ARCH_KEYSTONE=y 39CONFIG_ARCH_KEYSTONE=y
40CONFIG_ARCH_MESON=y 40CONFIG_ARCH_MESON=y
41CONFIG_ARCH_MXC=y 41CONFIG_ARCH_MXC=y
42CONFIG_SOC_IMX50=y
42CONFIG_SOC_IMX51=y 43CONFIG_SOC_IMX51=y
43CONFIG_SOC_IMX53=y 44CONFIG_SOC_IMX53=y
44CONFIG_SOC_IMX6Q=y 45CONFIG_SOC_IMX6Q=y
45CONFIG_SOC_IMX6SL=y 46CONFIG_SOC_IMX6SL=y
47CONFIG_SOC_IMX6SX=y
46CONFIG_SOC_VF610=y 48CONFIG_SOC_VF610=y
49CONFIG_SOC_LS1021A=y
47CONFIG_ARCH_OMAP3=y 50CONFIG_ARCH_OMAP3=y
48CONFIG_ARCH_OMAP4=y 51CONFIG_ARCH_OMAP4=y
49CONFIG_SOC_OMAP5=y 52CONFIG_SOC_OMAP5=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 9ff7b54b2a83..3743ca221d40 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -393,7 +393,7 @@ CONFIG_TI_EDMA=y
393CONFIG_DMA_OMAP=y 393CONFIG_DMA_OMAP=y
394# CONFIG_IOMMU_SUPPORT is not set 394# CONFIG_IOMMU_SUPPORT is not set
395CONFIG_EXTCON=m 395CONFIG_EXTCON=m
396CONFIG_EXTCON_GPIO=m 396CONFIG_EXTCON_USB_GPIO=m
397CONFIG_EXTCON_PALMAS=m 397CONFIG_EXTCON_PALMAS=m
398CONFIG_TI_EMIF=m 398CONFIG_TI_EMIF=m
399CONFIG_PWM=y 399CONFIG_PWM=y
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 8e3fcb924db6..2ef282f96651 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -25,7 +25,7 @@ struct dma_iommu_mapping {
25}; 25};
26 26
27struct dma_iommu_mapping * 27struct dma_iommu_mapping *
28arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size); 28arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
29 29
30void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); 30void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
31 31
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 2f7e6ff67d51..0b579b2f4e0e 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
110bool xen_arch_need_swiotlb(struct device *dev, 110bool xen_arch_need_swiotlb(struct device *dev,
111 unsigned long pfn, 111 unsigned long pfn,
112 unsigned long mfn); 112 unsigned long mfn);
113unsigned long xen_get_swiotlb_free_pages(unsigned int order);
113 114
114#endif /* _ASM_ARM_XEN_PAGE_H */ 115#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 91c7ba182dcd..213919ba326f 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -303,12 +303,17 @@ static int probe_current_pmu(struct arm_pmu *pmu)
303 303
304static int of_pmu_irq_cfg(struct platform_device *pdev) 304static int of_pmu_irq_cfg(struct platform_device *pdev)
305{ 305{
306 int i; 306 int i, irq;
307 int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); 307 int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
308 308
309 if (!irqs) 309 if (!irqs)
310 return -ENOMEM; 310 return -ENOMEM;
311 311
312 /* Don't bother with PPIs; they're already affine */
313 irq = platform_get_irq(pdev, 0);
314 if (irq >= 0 && irq_is_percpu(irq))
315 return 0;
316
312 for (i = 0; i < pdev->num_resources; ++i) { 317 for (i = 0; i < pdev->num_resources; ++i) {
313 struct device_node *dn; 318 struct device_node *dn;
314 int cpu; 319 int cpu;
@@ -317,7 +322,7 @@ static int of_pmu_irq_cfg(struct platform_device *pdev)
317 i); 322 i);
318 if (!dn) { 323 if (!dn) {
319 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", 324 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
320 of_node_full_name(dn), i); 325 of_node_full_name(pdev->dev.of_node), i);
321 break; 326 break;
322 } 327 }
323 328
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index fb8d4a2ad48c..a5edd7d60266 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de> 2 * Copyright (C) 2010 Pengutronix, Wolfram Sang <kernel@pengutronix.de>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it under 4 * This program is free software; you can redistribute it and/or modify it under
5 * the terms of the GNU General Public License version 2 as published by the 5 * the terms of the GNU General Public License version 2 as published by the
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index cbefbd7cfdb5..661d753df584 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -112,6 +112,7 @@
112#define OMAP3430_VC_CMD_ONLP_SHIFT 16 112#define OMAP3430_VC_CMD_ONLP_SHIFT 16
113#define OMAP3430_VC_CMD_RET_SHIFT 8 113#define OMAP3430_VC_CMD_RET_SHIFT 8
114#define OMAP3430_VC_CMD_OFF_SHIFT 0 114#define OMAP3430_VC_CMD_OFF_SHIFT 0
115#define OMAP3430_SREN_MASK (1 << 4)
115#define OMAP3430_HSEN_MASK (1 << 3) 116#define OMAP3430_HSEN_MASK (1 << 3)
116#define OMAP3430_MCODE_MASK (0x7 << 0) 117#define OMAP3430_MCODE_MASK (0x7 << 0)
117#define OMAP3430_VALID_MASK (1 << 24) 118#define OMAP3430_VALID_MASK (1 << 24)
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
index b1c7a33e00e7..e794828dee55 100644
--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -35,6 +35,7 @@
35#define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1 35#define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
36#define OMAP4430_GLOBAL_WUEN_MASK (1 << 16) 36#define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
37#define OMAP4430_HSMCODE_MASK (0x7 << 0) 37#define OMAP4430_HSMCODE_MASK (0x7 << 0)
38#define OMAP4430_SRMODEEN_MASK (1 << 4)
38#define OMAP4430_HSMODEEN_MASK (1 << 3) 39#define OMAP4430_HSMODEEN_MASK (1 << 3)
39#define OMAP4430_HSSCLL_SHIFT 24 40#define OMAP4430_HSSCLL_SHIFT 24
40#define OMAP4430_ICEPICK_RST_SHIFT 9 41#define OMAP4430_ICEPICK_RST_SHIFT 9
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index be9ef834fa81..076fd20d7e5a 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
316 * idle. And we can also scale voltages to zero for off-idle. 316 * idle. And we can also scale voltages to zero for off-idle.
317 * Note that no actual voltage scaling during off-idle will 317 * Note that no actual voltage scaling during off-idle will
318 * happen unless the board specific twl4030 PMIC scripts are 318 * happen unless the board specific twl4030 PMIC scripts are
319 * loaded. 319 * loaded. See also omap_vc_i2c_init for comments regarding
320 * erratum i531.
320 */ 321 */
321 val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); 322 val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
322 if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { 323 if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
704 return; 705 return;
705 } 706 }
706 707
708 /*
709 * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
710 * erratum i531 "Extra Power Consumed When Repeated Start Operation
711 * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
712 * Otherwise I2C4 eventually leads into about 23mW extra power being
713 * consumed even during off idle using VMODE.
714 */
707 i2c_high_speed = voltdm->pmic->i2c_high_speed; 715 i2c_high_speed = voltdm->pmic->i2c_high_speed;
708 if (i2c_high_speed) 716 if (i2c_high_speed)
709 voltdm->rmw(vc->common->i2c_cfg_hsen_mask, 717 voltdm->rmw(vc->common->i2c_cfg_clear_mask,
710 vc->common->i2c_cfg_hsen_mask, 718 vc->common->i2c_cfg_hsen_mask,
711 vc->common->i2c_cfg_reg); 719 vc->common->i2c_cfg_reg);
712 720
diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
index cdbdd78e755e..89b83b7ff3ec 100644
--- a/arch/arm/mach-omap2/vc.h
+++ b/arch/arm/mach-omap2/vc.h
@@ -34,6 +34,7 @@ struct voltagedomain;
34 * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register 34 * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
35 * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register 35 * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
36 * @i2c_cfg_reg: I2C configuration register offset 36 * @i2c_cfg_reg: I2C configuration register offset
37 * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
37 * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register 38 * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
38 * @i2c_mcode_mask: MCODE field mask for I2C config register 39 * @i2c_mcode_mask: MCODE field mask for I2C config register
39 * 40 *
@@ -52,6 +53,7 @@ struct omap_vc_common {
52 u8 cmd_ret_shift; 53 u8 cmd_ret_shift;
53 u8 cmd_off_shift; 54 u8 cmd_off_shift;
54 u8 i2c_cfg_reg; 55 u8 i2c_cfg_reg;
56 u8 i2c_cfg_clear_mask;
55 u8 i2c_cfg_hsen_mask; 57 u8 i2c_cfg_hsen_mask;
56 u8 i2c_mcode_mask; 58 u8 i2c_mcode_mask;
57}; 59};
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index 75bc4aa22b3a..71d74c9172c1 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
40 .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, 40 .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
41 .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, 41 .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
42 .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, 42 .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
43 .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
43 .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, 44 .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
44 .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, 45 .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
45 .i2c_mcode_mask = OMAP3430_MCODE_MASK, 46 .i2c_mcode_mask = OMAP3430_MCODE_MASK,
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index 085e5d6a04fd..2abd5fa8a697 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
42 .cmd_ret_shift = OMAP4430_RET_SHIFT, 42 .cmd_ret_shift = OMAP4430_RET_SHIFT,
43 .cmd_off_shift = OMAP4430_OFF_SHIFT, 43 .cmd_off_shift = OMAP4430_OFF_SHIFT,
44 .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET, 44 .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
45 .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
45 .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK, 46 .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
46 .i2c_mcode_mask = OMAP4430_HSMCODE_MASK, 47 .i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
47}; 48};
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 8896e71586f5..f09683687963 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -691,4 +691,13 @@ config SHARPSL_PM_MAX1111
691config PXA310_ULPI 691config PXA310_ULPI
692 bool 692 bool
693 693
694config PXA_SYSTEMS_CPLDS
695 tristate "Motherboard cplds"
696 default ARCH_LUBBOCK || MACH_MAINSTONE
697 help
698 This driver supports the Lubbock and Mainstone multifunction chip
699 found on the pxa25x development platform system (Lubbock) and pxa27x
700 development platform system (Mainstone). This IO board supports the
701 interrupts handling, ethernet controller, flash chips, etc ...
702
694endif 703endif
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index eb0bf7678a99..4087d334ecdf 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -90,4 +90,5 @@ obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
90obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o 90obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o
91obj-$(CONFIG_MACH_ZIPIT2) += z2.o 91obj-$(CONFIG_MACH_ZIPIT2) += z2.o
92 92
93obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
93obj-$(CONFIG_TOSA_BT) += tosa-bt.o 94obj-$(CONFIG_TOSA_BT) += tosa-bt.o
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 958cd6af9384..1eecf794acd2 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -37,7 +37,9 @@
37#define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100) 37#define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100)
38 38
39/* Board specific IRQs */ 39/* Board specific IRQs */
40#define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x)) 40#define LUBBOCK_NR_IRQS IRQ_BOARD_START
41
42#define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x))
41#define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0) 43#define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0)
42#define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1) 44#define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1)
43#define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */ 45#define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */
@@ -47,8 +49,7 @@
47#define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */ 49#define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */
48#define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6) 50#define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6)
49 51
50#define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16) 52#define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32)
51#define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55)
52 53
53#ifndef __ASSEMBLY__ 54#ifndef __ASSEMBLY__
54extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set); 55extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set);
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h
index 1bfc4e822a41..e82a7d31104e 100644
--- a/arch/arm/mach-pxa/include/mach/mainstone.h
+++ b/arch/arm/mach-pxa/include/mach/mainstone.h
@@ -120,7 +120,9 @@
120#define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */ 120#define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */
121 121
122/* board specific IRQs */ 122/* board specific IRQs */
123#define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x)) 123#define MAINSTONE_NR_IRQS IRQ_BOARD_START
124
125#define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x))
124#define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0) 126#define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0)
125#define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1) 127#define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1)
126#define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2) 128#define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2)
@@ -136,6 +138,4 @@
136#define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14) 138#define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14)
137#define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15) 139#define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15)
138 140
139#define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16)
140
141#endif 141#endif
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index d8a1be619f21..4ac9ab80d24b 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -12,6 +12,7 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14#include <linux/gpio.h> 14#include <linux/gpio.h>
15#include <linux/gpio/machine.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/init.h> 18#include <linux/init.h>
@@ -123,84 +124,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
123} 124}
124EXPORT_SYMBOL(lubbock_set_misc_wr); 125EXPORT_SYMBOL(lubbock_set_misc_wr);
125 126
126static unsigned long lubbock_irq_enabled;
127
128static void lubbock_mask_irq(struct irq_data *d)
129{
130 int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
131 LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
132}
133
134static void lubbock_unmask_irq(struct irq_data *d)
135{
136 int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
137 /* the irq can be acknowledged only if deasserted, so it's done here */
138 LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
139 LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
140}
141
142static struct irq_chip lubbock_irq_chip = {
143 .name = "FPGA",
144 .irq_ack = lubbock_mask_irq,
145 .irq_mask = lubbock_mask_irq,
146 .irq_unmask = lubbock_unmask_irq,
147};
148
149static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
150{
151 unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
152 do {
153 /* clear our parent irq */
154 desc->irq_data.chip->irq_ack(&desc->irq_data);
155 if (likely(pending)) {
156 irq = LUBBOCK_IRQ(0) + __ffs(pending);
157 generic_handle_irq(irq);
158 }
159 pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
160 } while (pending);
161}
162
163static void __init lubbock_init_irq(void)
164{
165 int irq;
166
167 pxa25x_init_irq();
168
169 /* setup extra lubbock irqs */
170 for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
171 irq_set_chip_and_handler(irq, &lubbock_irq_chip,
172 handle_level_irq);
173 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
174 }
175
176 irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
177 irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
178}
179
180#ifdef CONFIG_PM
181
182static void lubbock_irq_resume(void)
183{
184 LUB_IRQ_MASK_EN = lubbock_irq_enabled;
185}
186
187static struct syscore_ops lubbock_irq_syscore_ops = {
188 .resume = lubbock_irq_resume,
189};
190
191static int __init lubbock_irq_device_init(void)
192{
193 if (machine_is_lubbock()) {
194 register_syscore_ops(&lubbock_irq_syscore_ops);
195 return 0;
196 }
197 return -ENODEV;
198}
199
200device_initcall(lubbock_irq_device_init);
201
202#endif
203
204static int lubbock_udc_is_connected(void) 127static int lubbock_udc_is_connected(void)
205{ 128{
206 return (LUB_MISC_RD & (1 << 9)) == 0; 129 return (LUB_MISC_RD & (1 << 9)) == 0;
@@ -383,11 +306,38 @@ static struct platform_device lubbock_flash_device[2] = {
383 }, 306 },
384}; 307};
385 308
309static struct resource lubbock_cplds_resources[] = {
310 [0] = {
311 .start = LUBBOCK_FPGA_PHYS + 0xc0,
312 .end = LUBBOCK_FPGA_PHYS + 0xe0 - 1,
313 .flags = IORESOURCE_MEM,
314 },
315 [1] = {
316 .start = PXA_GPIO_TO_IRQ(0),
317 .end = PXA_GPIO_TO_IRQ(0),
318 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
319 },
320 [2] = {
321 .start = LUBBOCK_IRQ(0),
322 .end = LUBBOCK_IRQ(6),
323 .flags = IORESOURCE_IRQ,
324 },
325};
326
327static struct platform_device lubbock_cplds_device = {
328 .name = "pxa_cplds_irqs",
329 .id = -1,
330 .resource = &lubbock_cplds_resources[0],
331 .num_resources = 3,
332};
333
334
386static struct platform_device *devices[] __initdata = { 335static struct platform_device *devices[] __initdata = {
387 &sa1111_device, 336 &sa1111_device,
388 &smc91x_device, 337 &smc91x_device,
389 &lubbock_flash_device[0], 338 &lubbock_flash_device[0],
390 &lubbock_flash_device[1], 339 &lubbock_flash_device[1],
340 &lubbock_cplds_device,
391}; 341};
392 342
393static struct pxafb_mode_info sharp_lm8v31_mode = { 343static struct pxafb_mode_info sharp_lm8v31_mode = {
@@ -648,7 +598,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
648 /* Maintainer: MontaVista Software Inc. */ 598 /* Maintainer: MontaVista Software Inc. */
649 .map_io = lubbock_map_io, 599 .map_io = lubbock_map_io,
650 .nr_irqs = LUBBOCK_NR_IRQS, 600 .nr_irqs = LUBBOCK_NR_IRQS,
651 .init_irq = lubbock_init_irq, 601 .init_irq = pxa25x_init_irq,
652 .handle_irq = pxa25x_handle_irq, 602 .handle_irq = pxa25x_handle_irq,
653 .init_time = pxa_timer_init, 603 .init_time = pxa_timer_init,
654 .init_machine = lubbock_init, 604 .init_machine = lubbock_init,
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 78b84c0dfc79..2c0658cf6be2 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -13,6 +13,7 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/gpio/machine.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18#include <linux/syscore_ops.h> 19#include <linux/syscore_ops.h>
@@ -122,92 +123,6 @@ static unsigned long mainstone_pin_config[] = {
122 GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, 123 GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
123}; 124};
124 125
125static unsigned long mainstone_irq_enabled;
126
127static void mainstone_mask_irq(struct irq_data *d)
128{
129 int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
130 MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
131}
132
133static void mainstone_unmask_irq(struct irq_data *d)
134{
135 int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
136 /* the irq can be acknowledged only if deasserted, so it's done here */
137 MST_INTSETCLR &= ~(1 << mainstone_irq);
138 MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
139}
140
141static struct irq_chip mainstone_irq_chip = {
142 .name = "FPGA",
143 .irq_ack = mainstone_mask_irq,
144 .irq_mask = mainstone_mask_irq,
145 .irq_unmask = mainstone_unmask_irq,
146};
147
148static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
149{
150 unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
151 do {
152 /* clear useless edge notification */
153 desc->irq_data.chip->irq_ack(&desc->irq_data);
154 if (likely(pending)) {
155 irq = MAINSTONE_IRQ(0) + __ffs(pending);
156 generic_handle_irq(irq);
157 }
158 pending = MST_INTSETCLR & mainstone_irq_enabled;
159 } while (pending);
160}
161
162static void __init mainstone_init_irq(void)
163{
164 int irq;
165
166 pxa27x_init_irq();
167
168 /* setup extra Mainstone irqs */
169 for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
170 irq_set_chip_and_handler(irq, &mainstone_irq_chip,
171 handle_level_irq);
172 if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
173 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
174 else
175 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
176 }
177 set_irq_flags(MAINSTONE_IRQ(8), 0);
178 set_irq_flags(MAINSTONE_IRQ(12), 0);
179
180 MST_INTMSKENA = 0;
181 MST_INTSETCLR = 0;
182
183 irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
184 irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
185}
186
187#ifdef CONFIG_PM
188
189static void mainstone_irq_resume(void)
190{
191 MST_INTMSKENA = mainstone_irq_enabled;
192}
193
194static struct syscore_ops mainstone_irq_syscore_ops = {
195 .resume = mainstone_irq_resume,
196};
197
198static int __init mainstone_irq_device_init(void)
199{
200 if (machine_is_mainstone())
201 register_syscore_ops(&mainstone_irq_syscore_ops);
202
203 return 0;
204}
205
206device_initcall(mainstone_irq_device_init);
207
208#endif
209
210
211static struct resource smc91x_resources[] = { 126static struct resource smc91x_resources[] = {
212 [0] = { 127 [0] = {
213 .start = (MST_ETH_PHYS + 0x300), 128 .start = (MST_ETH_PHYS + 0x300),
@@ -487,11 +402,37 @@ static struct platform_device mst_gpio_keys_device = {
487 }, 402 },
488}; 403};
489 404
405static struct resource mst_cplds_resources[] = {
406 [0] = {
407 .start = MST_FPGA_PHYS + 0xc0,
408 .end = MST_FPGA_PHYS + 0xe0 - 1,
409 .flags = IORESOURCE_MEM,
410 },
411 [1] = {
412 .start = PXA_GPIO_TO_IRQ(0),
413 .end = PXA_GPIO_TO_IRQ(0),
414 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
415 },
416 [2] = {
417 .start = MAINSTONE_IRQ(0),
418 .end = MAINSTONE_IRQ(15),
419 .flags = IORESOURCE_IRQ,
420 },
421};
422
423static struct platform_device mst_cplds_device = {
424 .name = "pxa_cplds_irqs",
425 .id = -1,
426 .resource = &mst_cplds_resources[0],
427 .num_resources = 3,
428};
429
490static struct platform_device *platform_devices[] __initdata = { 430static struct platform_device *platform_devices[] __initdata = {
491 &smc91x_device, 431 &smc91x_device,
492 &mst_flash_device[0], 432 &mst_flash_device[0],
493 &mst_flash_device[1], 433 &mst_flash_device[1],
494 &mst_gpio_keys_device, 434 &mst_gpio_keys_device,
435 &mst_cplds_device,
495}; 436};
496 437
497static struct pxaohci_platform_data mainstone_ohci_platform_data = { 438static struct pxaohci_platform_data mainstone_ohci_platform_data = {
@@ -718,7 +659,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
718 .atag_offset = 0x100, /* BLOB boot parameter setting */ 659 .atag_offset = 0x100, /* BLOB boot parameter setting */
719 .map_io = mainstone_map_io, 660 .map_io = mainstone_map_io,
720 .nr_irqs = MAINSTONE_NR_IRQS, 661 .nr_irqs = MAINSTONE_NR_IRQS,
721 .init_irq = mainstone_init_irq, 662 .init_irq = pxa27x_init_irq,
722 .handle_irq = pxa27x_handle_irq, 663 .handle_irq = pxa27x_handle_irq,
723 .init_time = pxa_timer_init, 664 .init_time = pxa_timer_init,
724 .init_machine = mainstone_init, 665 .init_machine = mainstone_init,
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
new file mode 100644
index 000000000000..f1aeb54fabe3
--- /dev/null
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -0,0 +1,200 @@
1/*
2 * Intel Reference Systems cplds
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Cplds motherboard driver, supporting lubbock and mainstone SoC board.
12 */
13
14#include <linux/bitops.h>
15#include <linux/gpio.h>
16#include <linux/gpio/consumer.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/mfd/core.h>
22#include <linux/module.h>
23#include <linux/of_platform.h>
24
25#define FPGA_IRQ_MASK_EN 0x0
26#define FPGA_IRQ_SET_CLR 0x10
27
28#define CPLDS_NB_IRQ 32
29
30struct cplds {
31 void __iomem *base;
32 int irq;
33 unsigned int irq_mask;
34 struct gpio_desc *gpio0;
35 struct irq_domain *irqdomain;
36};
37
38static irqreturn_t cplds_irq_handler(int in_irq, void *d)
39{
40 struct cplds *fpga = d;
41 unsigned long pending;
42 unsigned int bit;
43
44 pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
45 for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
46 generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
47
48 return IRQ_HANDLED;
49}
50
51static void cplds_irq_mask_ack(struct irq_data *d)
52{
53 struct cplds *fpga = irq_data_get_irq_chip_data(d);
54 unsigned int cplds_irq = irqd_to_hwirq(d);
55 unsigned int set, bit = BIT(cplds_irq);
56
57 fpga->irq_mask &= ~bit;
58 writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
59 set = readl(fpga->base + FPGA_IRQ_SET_CLR);
60 writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
61}
62
63static void cplds_irq_unmask(struct irq_data *d)
64{
65 struct cplds *fpga = irq_data_get_irq_chip_data(d);
66 unsigned int cplds_irq = irqd_to_hwirq(d);
67 unsigned int bit = BIT(cplds_irq);
68
69 fpga->irq_mask |= bit;
70 writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
71}
72
73static struct irq_chip cplds_irq_chip = {
74 .name = "pxa_cplds",
75 .irq_mask_ack = cplds_irq_mask_ack,
76 .irq_unmask = cplds_irq_unmask,
77 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
78};
79
80static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq,
81 irq_hw_number_t hwirq)
82{
83 struct cplds *fpga = d->host_data;
84
85 irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq);
86 irq_set_chip_data(irq, fpga);
87
88 return 0;
89}
90
91static const struct irq_domain_ops cplds_irq_domain_ops = {
92 .xlate = irq_domain_xlate_twocell,
93 .map = cplds_irq_domain_map,
94};
95
96static int cplds_resume(struct platform_device *pdev)
97{
98 struct cplds *fpga = platform_get_drvdata(pdev);
99
100 writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
101
102 return 0;
103}
104
105static int cplds_probe(struct platform_device *pdev)
106{
107 struct resource *res;
108 struct cplds *fpga;
109 int ret;
110 unsigned int base_irq = 0;
111 unsigned long irqflags = 0;
112
113 fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
114 if (!fpga)
115 return -ENOMEM;
116
117 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
118 if (res) {
119 fpga->irq = (unsigned int)res->start;
120 irqflags = res->flags;
121 }
122 if (!fpga->irq)
123 return -ENODEV;
124
125 base_irq = platform_get_irq(pdev, 1);
126 if (base_irq < 0)
127 base_irq = 0;
128
129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
130 fpga->base = devm_ioremap_resource(&pdev->dev, res);
131 if (IS_ERR(fpga->base))
132 return PTR_ERR(fpga->base);
133
134 platform_set_drvdata(pdev, fpga);
135
136 writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
137 writel(0, fpga->base + FPGA_IRQ_SET_CLR);
138
139 ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
140 irqflags, dev_name(&pdev->dev), fpga);
141 if (ret == -ENOSYS)
142 return -EPROBE_DEFER;
143
144 if (ret) {
145 dev_err(&pdev->dev, "couldn't request main irq%d: %d\n",
146 fpga->irq, ret);
147 return ret;
148 }
149
150 irq_set_irq_wake(fpga->irq, 1);
151 fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
152 CPLDS_NB_IRQ,
153 &cplds_irq_domain_ops, fpga);
154 if (!fpga->irqdomain)
155 return -ENODEV;
156
157 if (base_irq) {
158 ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
159 CPLDS_NB_IRQ);
160 if (ret) {
161 dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
162 base_irq, base_irq + CPLDS_NB_IRQ);
163 return ret;
164 }
165 }
166
167 return 0;
168}
169
170static int cplds_remove(struct platform_device *pdev)
171{
172 struct cplds *fpga = platform_get_drvdata(pdev);
173
174 irq_set_chip_and_handler(fpga->irq, NULL, NULL);
175
176 return 0;
177}
178
179static const struct of_device_id cplds_id_table[] = {
180 { .compatible = "intel,lubbock-cplds-irqs", },
181 { .compatible = "intel,mainstone-cplds-irqs", },
182 { }
183};
184MODULE_DEVICE_TABLE(of, cplds_id_table);
185
186static struct platform_driver cplds_driver = {
187 .driver = {
188 .name = "pxa_cplds_irqs",
189 .of_match_table = of_match_ptr(cplds_id_table),
190 },
191 .probe = cplds_probe,
192 .remove = cplds_remove,
193 .resume = cplds_resume,
194};
195
196module_platform_driver(cplds_driver);
197
198MODULE_DESCRIPTION("PXA Cplds interrupts driver");
199MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
200MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c
index b07d88602073..22812fe06460 100644
--- a/arch/arm/mach-rockchip/pm.c
+++ b/arch/arm/mach-rockchip/pm.c
@@ -44,9 +44,11 @@ static void __iomem *rk3288_bootram_base;
44static phys_addr_t rk3288_bootram_phy; 44static phys_addr_t rk3288_bootram_phy;
45 45
46static struct regmap *pmu_regmap; 46static struct regmap *pmu_regmap;
47static struct regmap *grf_regmap;
47static struct regmap *sgrf_regmap; 48static struct regmap *sgrf_regmap;
48 49
49static u32 rk3288_pmu_pwr_mode_con; 50static u32 rk3288_pmu_pwr_mode_con;
51static u32 rk3288_grf_soc_con0;
50static u32 rk3288_sgrf_soc_con0; 52static u32 rk3288_sgrf_soc_con0;
51 53
52static inline u32 rk3288_l2_config(void) 54static inline u32 rk3288_l2_config(void)
@@ -70,12 +72,26 @@ static void rk3288_slp_mode_set(int level)
70{ 72{
71 u32 mode_set, mode_set1; 73 u32 mode_set, mode_set1;
72 74
75 regmap_read(grf_regmap, RK3288_GRF_SOC_CON0, &rk3288_grf_soc_con0);
76
73 regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0); 77 regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0);
74 78
75 regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON, 79 regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
76 &rk3288_pmu_pwr_mode_con); 80 &rk3288_pmu_pwr_mode_con);
77 81
78 /* 82 /*
83 * We need set this bit GRF_FORCE_JTAG here, for the debug module,
84 * otherwise, it may become inaccessible after resume.
85 * This creates a potential security issue, as the sdmmc pins may
86 * accept jtag data for a short time during resume if no card is
87 * inserted.
88 * But this is of course also true for the regular boot, before we
89 * turn of the jtag/sdmmc autodetect.
90 */
91 regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, GRF_FORCE_JTAG |
92 GRF_FORCE_JTAG_WRITE);
93
94 /*
79 * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR 95 * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
80 * PCLK_WDT_GATE - disable WDT during suspend. 96 * PCLK_WDT_GATE - disable WDT during suspend.
81 */ 97 */
@@ -83,6 +99,13 @@ static void rk3288_slp_mode_set(int level)
83 SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN 99 SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN
84 | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE); 100 | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE);
85 101
102 /*
103 * The dapswjdp can not auto reset before resume, that cause it may
104 * access some illegal address during resume. Let's disable it before
105 * suspend, and the MASKROM will enable it back.
106 */
107 regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, SGRF_DAPDEVICEEN_WRITE);
108
86 /* booting address of resuming system is from this register value */ 109 /* booting address of resuming system is from this register value */
87 regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR, 110 regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR,
88 rk3288_bootram_phy); 111 rk3288_bootram_phy);
@@ -128,6 +151,9 @@ static void rk3288_slp_mode_set_resume(void)
128 regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0, 151 regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
129 rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE 152 rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE
130 | SGRF_FAST_BOOT_EN_WRITE); 153 | SGRF_FAST_BOOT_EN_WRITE);
154
155 regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, rk3288_grf_soc_con0 |
156 GRF_FORCE_JTAG_WRITE);
131} 157}
132 158
133static int rockchip_lpmode_enter(unsigned long arg) 159static int rockchip_lpmode_enter(unsigned long arg)
@@ -186,6 +212,13 @@ static int rk3288_suspend_init(struct device_node *np)
186 return PTR_ERR(pmu_regmap); 212 return PTR_ERR(pmu_regmap);
187 } 213 }
188 214
215 grf_regmap = syscon_regmap_lookup_by_compatible(
216 "rockchip,rk3288-grf");
217 if (IS_ERR(grf_regmap)) {
218 pr_err("%s: could not find grf regmap\n", __func__);
219 return PTR_ERR(pmu_regmap);
220 }
221
189 sram_np = of_find_compatible_node(NULL, NULL, 222 sram_np = of_find_compatible_node(NULL, NULL,
190 "rockchip,rk3288-pmu-sram"); 223 "rockchip,rk3288-pmu-sram");
191 if (!sram_np) { 224 if (!sram_np) {
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 03ff31d8282d..f8a747bc1437 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -48,6 +48,10 @@ static inline void rockchip_suspend_init(void)
48#define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44 48#define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44
49#define RK3288_PMU_PWRMODE_CON1 0x90 49#define RK3288_PMU_PWRMODE_CON1 0x90
50 50
51#define RK3288_GRF_SOC_CON0 0x244
52#define GRF_FORCE_JTAG BIT(12)
53#define GRF_FORCE_JTAG_WRITE BIT(28)
54
51#define RK3288_SGRF_SOC_CON0 (0x0000) 55#define RK3288_SGRF_SOC_CON0 (0x0000)
52#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120) 56#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120)
53#define SGRF_PCLK_WDT_GATE BIT(6) 57#define SGRF_PCLK_WDT_GATE BIT(6)
@@ -55,6 +59,10 @@ static inline void rockchip_suspend_init(void)
55#define SGRF_FAST_BOOT_EN BIT(8) 59#define SGRF_FAST_BOOT_EN BIT(8)
56#define SGRF_FAST_BOOT_EN_WRITE BIT(24) 60#define SGRF_FAST_BOOT_EN_WRITE BIT(24)
57 61
62#define RK3288_SGRF_CPU_CON0 (0x40)
63#define SGRF_DAPDEVICEEN BIT(0)
64#define SGRF_DAPDEVICEEN_WRITE BIT(16)
65
58#define RK3288_CRU_MODE_CON 0x50 66#define RK3288_CRU_MODE_CON 0x50
59#define RK3288_CRU_SEL0_CON 0x60 67#define RK3288_CRU_SEL0_CON 0x60
60#define RK3288_CRU_SEL1_CON 0x64 68#define RK3288_CRU_SEL1_CON 0x64
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d360ec044b66..b6cf3b449428 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -30,11 +30,30 @@
30#include "pm.h" 30#include "pm.h"
31 31
32#define RK3288_GRF_SOC_CON0 0x244 32#define RK3288_GRF_SOC_CON0 0x244
33#define RK3288_TIMER6_7_PHYS 0xff810000
33 34
34static void __init rockchip_timer_init(void) 35static void __init rockchip_timer_init(void)
35{ 36{
36 if (of_machine_is_compatible("rockchip,rk3288")) { 37 if (of_machine_is_compatible("rockchip,rk3288")) {
37 struct regmap *grf; 38 struct regmap *grf;
39 void __iomem *reg_base;
40
41 /*
42 * Most/all uboot versions for rk3288 don't enable timer7
43 * which is needed for the architected timer to work.
44 * So make sure it is running during early boot.
45 */
46 reg_base = ioremap(RK3288_TIMER6_7_PHYS, SZ_16K);
47 if (reg_base) {
48 writel(0, reg_base + 0x30);
49 writel(0xffffffff, reg_base + 0x20);
50 writel(0xffffffff, reg_base + 0x24);
51 writel(1, reg_base + 0x30);
52 dsb();
53 iounmap(reg_base);
54 } else {
55 pr_err("rockchip: could not map timer7 registers\n");
56 }
38 57
39 /* 58 /*
40 * Disable auto jtag/sdmmc switching that causes issues 59 * Disable auto jtag/sdmmc switching that causes issues
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 09c5fe3d30c2..7e7583ddd607 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1878,7 +1878,7 @@ struct dma_map_ops iommu_coherent_ops = {
1878 * arm_iommu_attach_device function. 1878 * arm_iommu_attach_device function.
1879 */ 1879 */
1880struct dma_iommu_mapping * 1880struct dma_iommu_mapping *
1881arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) 1881arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
1882{ 1882{
1883 unsigned int bits = size >> PAGE_SHIFT; 1883 unsigned int bits = size >> PAGE_SHIFT;
1884 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 1884 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@@ -1886,6 +1886,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
1886 int extensions = 1; 1886 int extensions = 1;
1887 int err = -ENOMEM; 1887 int err = -ENOMEM;
1888 1888
1889 /* currently only 32-bit DMA address space is supported */
1890 if (size > DMA_BIT_MASK(32) + 1)
1891 return ERR_PTR(-ERANGE);
1892
1889 if (!bitmap_size) 1893 if (!bitmap_size)
1890 return ERR_PTR(-EINVAL); 1894 return ERR_PTR(-EINVAL);
1891 1895
@@ -2057,13 +2061,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
2057 if (!iommu) 2061 if (!iommu)
2058 return false; 2062 return false;
2059 2063
2060 /*
2061 * currently arm_iommu_create_mapping() takes a max of size_t
2062 * for size param. So check this limit for now.
2063 */
2064 if (size > SIZE_MAX)
2065 return false;
2066
2067 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 2064 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
2068 if (IS_ERR(mapping)) { 2065 if (IS_ERR(mapping)) {
2069 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 2066 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index aa0519eed698..774ef1323554 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -22,8 +22,6 @@
22 * 22 *
23 * These are the low level assembler for performing cache and TLB 23 * These are the low level assembler for performing cache and TLB
24 * functions on the arm1020. 24 * functions on the arm1020.
25 *
26 * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
27 */ 25 */
28#include <linux/linkage.h> 26#include <linux/linkage.h>
29#include <linux/init.h> 27#include <linux/init.h>
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index bff4c7f70fd6..ae3c27b71594 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -22,8 +22,6 @@
22 * 22 *
23 * These are the low level assembler for performing cache and TLB 23 * These are the low level assembler for performing cache and TLB
24 * functions on the arm1020e. 24 * functions on the arm1020e.
25 *
26 * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
27 */ 25 */
28#include <linux/linkage.h> 26#include <linux/linkage.h>
29#include <linux/init.h> 27#include <linux/init.h>
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ede8c54ab4aa..32a47cc19076 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext)
441 .type __arm925_setup, #function 441 .type __arm925_setup, #function
442__arm925_setup: 442__arm925_setup:
443 mov r0, #0 443 mov r0, #0
444#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
445 orr r0,r0,#1 << 7
446#endif
447 444
448 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ 445 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */
449 orr r0,r0,#1 << 1 @ transparent mode on 446 orr r0,r0,#1 << 1 @ transparent mode on
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index e494d6d6acbe..92e08bf37aad 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -602,7 +602,6 @@ __\name\()_proc_info:
602 PMD_SECT_AP_WRITE | \ 602 PMD_SECT_AP_WRITE | \
603 PMD_SECT_AP_READ 603 PMD_SECT_AP_READ
604 initfn __feroceon_setup, __\name\()_proc_info 604 initfn __feroceon_setup, __\name\()_proc_info
605 .long __feroceon_setup
606 .long cpu_arch_name 605 .long cpu_arch_name
607 .long cpu_elf_name 606 .long cpu_elf_name
608 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 607 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index b5f470ddab6d..4550d247e308 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -54,6 +54,7 @@
54#define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) 54#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55 55
56#define FLAG_NEED_X_RESET (1 << 0) 56#define FLAG_NEED_X_RESET (1 << 0)
57#define FLAG_IMM_OVERFLOW (1 << 1)
57 58
58struct jit_ctx { 59struct jit_ctx {
59 const struct bpf_prog *skf; 60 const struct bpf_prog *skf;
@@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
293 /* PC in ARM mode == address of the instruction + 8 */ 294 /* PC in ARM mode == address of the instruction + 8 */
294 imm = offset - (8 + ctx->idx * 4); 295 imm = offset - (8 + ctx->idx * 4);
295 296
297 if (imm & ~0xfff) {
298 /*
299 * literal pool is too far, signal it into flags. we
300 * can only detect it on the second pass unfortunately.
301 */
302 ctx->flags |= FLAG_IMM_OVERFLOW;
303 return 0;
304 }
305
296 return imm; 306 return imm;
297} 307}
298 308
@@ -449,10 +459,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
449 return; 459 return;
450 } 460 }
451#endif 461#endif
452 if (rm != ARM_R0) 462
453 emit(ARM_MOV_R(ARM_R0, rm), ctx); 463 /*
464 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
465 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
466 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
467 * before using it as a source for ARM_R1.
468 *
469 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
470 * ARM_R5 (r_X) so there is no particular register overlap
471 * issues.
472 */
454 if (rn != ARM_R1) 473 if (rn != ARM_R1)
455 emit(ARM_MOV_R(ARM_R1, rn), ctx); 474 emit(ARM_MOV_R(ARM_R1, rn), ctx);
475 if (rm != ARM_R0)
476 emit(ARM_MOV_R(ARM_R0, rm), ctx);
456 477
457 ctx->seen |= SEEN_CALL; 478 ctx->seen |= SEEN_CALL;
458 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); 479 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
@@ -865,6 +886,14 @@ b_epilogue:
865 default: 886 default:
866 return -1; 887 return -1;
867 } 888 }
889
890 if (ctx->flags & FLAG_IMM_OVERFLOW)
891 /*
892 * this instruction generated an overflow when
893 * trying to access the literal pool, so
894 * delegate this filter to the kernel interpreter.
895 */
896 return -1;
868 } 897 }
869 898
870 /* compute offsets only during the first pass */ 899 /* compute offsets only during the first pass */
@@ -927,7 +956,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
927 ctx.idx = 0; 956 ctx.idx = 0;
928 957
929 build_prologue(&ctx); 958 build_prologue(&ctx);
930 build_body(&ctx); 959 if (build_body(&ctx) < 0) {
960#if __LINUX_ARM_ARCH__ < 7
961 if (ctx.imm_count)
962 kfree(ctx.imms);
963#endif
964 bpf_jit_binary_free(header);
965 goto out;
966 }
931 build_epilogue(&ctx); 967 build_epilogue(&ctx);
932 968
933 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 969 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 793551d15f1d..498325074a06 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -4,6 +4,7 @@
4#include <linux/gfp.h> 4#include <linux/gfp.h>
5#include <linux/highmem.h> 5#include <linux/highmem.h>
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/memblock.h>
7#include <linux/of_address.h> 8#include <linux/of_address.h>
8#include <linux/slab.h> 9#include <linux/slab.h>
9#include <linux/types.h> 10#include <linux/types.h>
@@ -21,6 +22,20 @@
21#include <asm/xen/hypercall.h> 22#include <asm/xen/hypercall.h>
22#include <asm/xen/interface.h> 23#include <asm/xen/interface.h>
23 24
25unsigned long xen_get_swiotlb_free_pages(unsigned int order)
26{
27 struct memblock_region *reg;
28 gfp_t flags = __GFP_NOWARN;
29
30 for_each_memblock(memory, reg) {
31 if (reg->base < (phys_addr_t)0xffffffff) {
32 flags |= __GFP_DMA;
33 break;
34 }
35 }
36 return __get_free_pages(flags, order);
37}
38
24enum dma_cache_op { 39enum dma_cache_op {
25 DMA_UNMAP, 40 DMA_UNMAP,
26 DMA_MAP, 41 DMA_MAP,
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
index 9499199924ae..6a37c3c6b11d 100644
--- a/arch/arm64/crypto/crc32-arm64.c
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
147{ 147{
148 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); 148 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
149 149
150 put_unaligned_le32(ctx->crc, out);
151 return 0;
152}
153
154static int chksumc_final(struct shash_desc *desc, u8 *out)
155{
156 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
157
150 put_unaligned_le32(~ctx->crc, out); 158 put_unaligned_le32(~ctx->crc, out);
151 return 0; 159 return 0;
152} 160}
153 161
154static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) 162static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
155{ 163{
156 put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out); 164 put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out);
157 return 0; 165 return 0;
158} 166}
159 167
@@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
199{ 207{
200 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); 208 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
201 209
210 mctx->key = 0;
211 return 0;
212}
213
214static int crc32c_cra_init(struct crypto_tfm *tfm)
215{
216 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
217
202 mctx->key = ~0; 218 mctx->key = ~0;
203 return 0; 219 return 0;
204} 220}
@@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = {
229 .setkey = chksum_setkey, 245 .setkey = chksum_setkey,
230 .init = chksum_init, 246 .init = chksum_init,
231 .update = chksumc_update, 247 .update = chksumc_update,
232 .final = chksum_final, 248 .final = chksumc_final,
233 .finup = chksumc_finup, 249 .finup = chksumc_finup,
234 .digest = chksumc_digest, 250 .digest = chksumc_digest,
235 .descsize = sizeof(struct chksum_desc_ctx), 251 .descsize = sizeof(struct chksum_desc_ctx),
@@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = {
241 .cra_alignmask = 0, 257 .cra_alignmask = 0,
242 .cra_ctxsize = sizeof(struct chksum_ctx), 258 .cra_ctxsize = sizeof(struct chksum_ctx),
243 .cra_module = THIS_MODULE, 259 .cra_module = THIS_MODULE,
244 .cra_init = crc32_cra_init, 260 .cra_init = crc32c_cra_init,
245 } 261 }
246}; 262};
247 263
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 114e7cc5de8c..aefda9868627 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
74 74
75static int sha1_ce_final(struct shash_desc *desc, u8 *out) 75static int sha1_ce_final(struct shash_desc *desc, u8 *out)
76{ 76{
77 struct sha1_ce_state *sctx = shash_desc_ctx(desc);
78
79 sctx->finalize = 0;
77 kernel_neon_begin_partial(16); 80 kernel_neon_begin_partial(16);
78 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); 81 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
79 kernel_neon_end(); 82 kernel_neon_end();
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 1340e44c048b..7cd587564a41 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
75 75
76static int sha256_ce_final(struct shash_desc *desc, u8 *out) 76static int sha256_ce_final(struct shash_desc *desc, u8 *out)
77{ 77{
78 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
79
80 sctx->finalize = 0;
78 kernel_neon_begin_partial(28); 81 kernel_neon_begin_partial(28);
79 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); 82 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
80 kernel_neon_end(); 83 kernel_neon_end();
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index ce7aea34fdf4..c18ddc74ef9a 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -45,7 +45,7 @@ static volatile unsigned long flushcache_cpumask = 0;
45/* 45/*
46 * For flush_tlb_others() 46 * For flush_tlb_others()
47 */ 47 */
48static volatile cpumask_t flush_cpumask; 48static cpumask_t flush_cpumask;
49static struct mm_struct *flush_mm; 49static struct mm_struct *flush_mm;
50static struct vm_area_struct *flush_vma; 50static struct vm_area_struct *flush_vma;
51static volatile unsigned long flush_va; 51static volatile unsigned long flush_va;
@@ -415,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
415 */ 415 */
416 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); 416 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
417 417
418 while (!cpumask_empty((cpumask_t*)&flush_cpumask)) { 418 while (!cpumask_empty(&flush_cpumask)) {
419 /* nothing. lockup detection does not belong here */ 419 /* nothing. lockup detection does not belong here */
420 mb(); 420 mb();
421 } 421 }
@@ -468,7 +468,7 @@ void smp_invalidate_interrupt(void)
468 __flush_tlb_page(va); 468 __flush_tlb_page(va);
469 } 469 }
470 } 470 }
471 cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); 471 cpumask_clear_cpu(cpu_id, &flush_cpumask);
472} 472}
473 473
474/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 474/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index bb02fac9b4fa..2b25d1ba1ea0 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -45,7 +45,7 @@ extern int __cpu_logical_map[NR_CPUS];
45#define SMP_DUMP 0x8 45#define SMP_DUMP 0x8
46#define SMP_ASK_C0COUNT 0x10 46#define SMP_ASK_C0COUNT 0x10
47 47
48extern volatile cpumask_t cpu_callin_map; 48extern cpumask_t cpu_callin_map;
49 49
50/* Mask of CPUs which are currently definitely operating coherently */ 50/* Mask of CPUs which are currently definitely operating coherently */
51extern cpumask_t cpu_coherent_mask; 51extern cpumask_t cpu_coherent_mask;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index be4899f3c393..4a4d9e067c89 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
76 76
77 /* Lets see if this is an O32 ELF */ 77 /* Lets see if this is an O32 ELF */
78 if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { 78 if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
79 /* FR = 1 for N32 */
80 if (ehdr32->e_flags & EF_MIPS_ABI2)
81 state->overall_fp_mode = FP_FR1;
82 else
83 /* Set a good default FPU mode for O32 */
84 state->overall_fp_mode = cpu_has_mips_r6 ?
85 FP_FRE : FP_FR0;
86
87 if (ehdr32->e_flags & EF_MIPS_FP64) { 79 if (ehdr32->e_flags & EF_MIPS_FP64) {
88 /* 80 /*
89 * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it 81 * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
104 (char *)&abiflags, 96 (char *)&abiflags,
105 sizeof(abiflags)); 97 sizeof(abiflags));
106 } else { 98 } else {
107 /* FR=1 is really the only option for 64-bit */
108 state->overall_fp_mode = FP_FR1;
109
110 if (phdr64->p_type != PT_MIPS_ABIFLAGS) 99 if (phdr64->p_type != PT_MIPS_ABIFLAGS)
111 return 0; 100 return 0;
112 if (phdr64->p_filesz < sizeof(abiflags)) 101 if (phdr64->p_filesz < sizeof(abiflags))
@@ -137,6 +126,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
137 struct elf32_hdr *ehdr = _ehdr; 126 struct elf32_hdr *ehdr = _ehdr;
138 struct mode_req prog_req, interp_req; 127 struct mode_req prog_req, interp_req;
139 int fp_abi, interp_fp_abi, abi0, abi1, max_abi; 128 int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
129 bool is_mips64;
140 130
141 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 131 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
142 return 0; 132 return 0;
@@ -152,10 +142,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
152 abi0 = abi1 = fp_abi; 142 abi0 = abi1 = fp_abi;
153 } 143 }
154 144
155 /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ 145 is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
156 max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && 146 (ehdr->e_flags & EF_MIPS_ABI2);
157 (!(ehdr->e_flags & EF_MIPS_ABI2))) ? 147
158 MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; 148 if (is_mips64) {
149 /* MIPS64 code always uses FR=1, thus the default is easy */
150 state->overall_fp_mode = FP_FR1;
151
152 /* Disallow access to the various FPXX & FP64 ABIs */
153 max_abi = MIPS_ABI_FP_SOFT;
154 } else {
155 /* Default to a mode capable of running code expecting FR=0 */
156 state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
157
158 /* Allow all ABIs we know about */
159 max_abi = MIPS_ABI_FP_64A;
160 }
159 161
160 if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || 162 if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
161 (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) 163 (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 193ace7955fb..faa46ebd9dda 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -43,7 +43,7 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/setup.h> 44#include <asm/setup.h>
45 45
46volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 46cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
47 47
48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 48int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
49EXPORT_SYMBOL(__cpu_number_map); 49EXPORT_SYMBOL(__cpu_number_map);
@@ -218,8 +218,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
218 /* 218 /*
219 * Trust is futile. We should really have timeouts ... 219 * Trust is futile. We should really have timeouts ...
220 */ 220 */
221 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) 221 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
222 udelay(100); 222 udelay(100);
223 schedule();
224 }
223 225
224 synchronise_count_master(cpu); 226 synchronise_count_master(cpu);
225 return 0; 227 return 0;
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 5047659815a5..5d836b7c1176 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -11,7 +11,7 @@
11#define TM_CAUSE_RESCHED 0xde 11#define TM_CAUSE_RESCHED 0xde
12#define TM_CAUSE_TLBI 0xdc 12#define TM_CAUSE_TLBI 0xdc
13#define TM_CAUSE_FAC_UNAV 0xda 13#define TM_CAUSE_FAC_UNAV 0xda
14#define TM_CAUSE_SYSCALL 0xd8 14#define TM_CAUSE_SYSCALL 0xd8 /* future use */
15#define TM_CAUSE_MISC 0xd6 /* future use */ 15#define TM_CAUSE_MISC 0xd6 /* future use */
16#define TM_CAUSE_SIGNAL 0xd4 16#define TM_CAUSE_SIGNAL 0xd4
17#define TM_CAUSE_ALIGNMENT 0xd2 17#define TM_CAUSE_ALIGNMENT 0xd2
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 44b480e3a5af..9ee61d15653d 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -749,21 +749,24 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
749 eeh_unfreeze_pe(pe, false); 749 eeh_unfreeze_pe(pe, false);
750 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 750 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
751 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); 751 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
752 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
752 break; 753 break;
753 case pcie_hot_reset: 754 case pcie_hot_reset:
755 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
754 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 756 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
755 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); 757 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
756 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 758 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
757 eeh_ops->reset(pe, EEH_RESET_HOT); 759 eeh_ops->reset(pe, EEH_RESET_HOT);
758 break; 760 break;
759 case pcie_warm_reset: 761 case pcie_warm_reset:
762 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
760 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 763 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
761 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); 764 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
762 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 765 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
763 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 766 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
764 break; 767 break;
765 default: 768 default:
766 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 769 eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED);
767 return -EINVAL; 770 return -EINVAL;
768 }; 771 };
769 772
@@ -1058,6 +1061,9 @@ void eeh_add_device_early(struct pci_dn *pdn)
1058 if (!edev || !eeh_enabled()) 1061 if (!edev || !eeh_enabled())
1059 return; 1062 return;
1060 1063
1064 if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
1065 return;
1066
1061 /* USB Bus children of PCI devices will not have BUID's */ 1067 /* USB Bus children of PCI devices will not have BUID's */
1062 phb = edev->phb; 1068 phb = edev->phb;
1063 if (NULL == phb || 1069 if (NULL == phb ||
@@ -1112,6 +1118,9 @@ void eeh_add_device_late(struct pci_dev *dev)
1112 return; 1118 return;
1113 } 1119 }
1114 1120
1121 if (eeh_has_flag(EEH_PROBE_MODE_DEV))
1122 eeh_ops->probe(pdn, NULL);
1123
1115 /* 1124 /*
1116 * The EEH cache might not be removed correctly because of 1125 * The EEH cache might not be removed correctly because of
1117 * unbalanced kref to the device during unplug time, which 1126 * unbalanced kref to the device during unplug time, which
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8ca9434c40e6..afbc20019c2e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -34,7 +34,6 @@
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h> 35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h> 36#include <asm/context_tracking.h>
37#include <asm/tm.h>
38 37
39/* 38/*
40 * System calls. 39 * System calls.
@@ -146,24 +145,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
146 andi. r11,r10,_TIF_SYSCALL_DOTRACE 145 andi. r11,r10,_TIF_SYSCALL_DOTRACE
147 bne syscall_dotrace 146 bne syscall_dotrace
148.Lsyscall_dotrace_cont: 147.Lsyscall_dotrace_cont:
149#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
150BEGIN_FTR_SECTION
151 b 1f
152END_FTR_SECTION_IFCLR(CPU_FTR_TM)
153 extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
154 beq+ 1f
155
156 /* Doom the transaction and don't perform the syscall: */
157 mfmsr r11
158 li r12, 1
159 rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG
160 mtmsrd r11, 0
161 li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
162 TABORT(R11)
163
164 b .Lsyscall_exit
1651:
166#endif
167 cmpldi 0,r0,NR_syscalls 148 cmpldi 0,r0,NR_syscalls
168 bge- syscall_enosys 149 bge- syscall_enosys
169 150
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index eeaa0d5f69d5..ccde8f084ce4 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -501,9 +501,11 @@ BEGIN_FTR_SECTION
501 CHECK_HMI_INTERRUPT 501 CHECK_HMI_INTERRUPT
502END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 502END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
503 ld r1,PACAR1(r13) 503 ld r1,PACAR1(r13)
504 ld r6,_CCR(r1)
504 ld r4,_MSR(r1) 505 ld r4,_MSR(r1)
505 ld r5,_NIP(r1) 506 ld r5,_NIP(r1)
506 addi r1,r1,INT_FRAME_SIZE 507 addi r1,r1,INT_FRAME_SIZE
508 mtcr r6
507 mtspr SPRN_SRR1,r4 509 mtspr SPRN_SRR1,r4
508 mtspr SPRN_SRR0,r5 510 mtspr SPRN_SRR0,r5
509 rfid 511 rfid
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 8f3e6cc54d95..c6ca7db64673 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -12,6 +12,7 @@
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/anon_inodes.h> 14#include <linux/anon_inodes.h>
15#include <linux/spinlock.h>
15 16
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
17#include <asm/kvm_book3s.h> 18#include <asm/kvm_book3s.h>
@@ -20,7 +21,6 @@
20#include <asm/xics.h> 21#include <asm/xics.h>
21#include <asm/debug.h> 22#include <asm/debug.h>
22#include <asm/time.h> 23#include <asm/time.h>
23#include <asm/spinlock.h>
24 24
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 920c252d1f49..f8bc950efcae 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2693,7 +2693,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2693 hose->last_busno = 0xff; 2693 hose->last_busno = 0xff;
2694 } 2694 }
2695 hose->private_data = phb; 2695 hose->private_data = phb;
2696 hose->controller_ops = pnv_pci_controller_ops;
2697 phb->hub_id = hub_id; 2696 phb->hub_id = hub_id;
2698 phb->opal_id = phb_id; 2697 phb->opal_id = phb_id;
2699 phb->type = ioda_type; 2698 phb->type = ioda_type;
@@ -2812,6 +2811,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2812 pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook; 2811 pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
2813 pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment; 2812 pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
2814 pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus; 2813 pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
2814 hose->controller_ops = pnv_pci_controller_ops;
2815 2815
2816#ifdef CONFIG_PCI_IOV 2816#ifdef CONFIG_PCI_IOV
2817 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; 2817 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index b4b11096ea8b..019d34aaf054 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -412,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
412 if (rc) 412 if (rc)
413 return -EINVAL; 413 return -EINVAL;
414 414
415 rc = dlpar_acquire_drc(drc_index);
416 if (rc)
417 return -EINVAL;
418
415 parent = of_find_node_by_path("/cpus"); 419 parent = of_find_node_by_path("/cpus");
416 if (!parent) 420 if (!parent)
417 return -ENODEV; 421 return -ENODEV;
@@ -422,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
422 426
423 of_node_put(parent); 427 of_node_put(parent);
424 428
425 rc = dlpar_acquire_drc(drc_index);
426 if (rc) {
427 dlpar_free_cc_nodes(dn);
428 return -EINVAL;
429 }
430
431 rc = dlpar_attach_node(dn); 429 rc = dlpar_attach_node(dn);
432 if (rc) { 430 if (rc) {
433 dlpar_release_drc(drc_index); 431 dlpar_release_drc(drc_index);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ef17683484e9..48304b89b601 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c)
1109 if (!cmdline_ptr) 1109 if (!cmdline_ptr)
1110 goto fail; 1110 goto fail;
1111 hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; 1111 hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
1112 /* Fill in upper bits of command line address, NOP on 32 bit */
1113 boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
1112 1114
1113 hdr->ramdisk_image = 0; 1115 hdr->ramdisk_image = 0;
1114 hdr->ramdisk_size = 0; 1116 hdr->ramdisk_size = 0;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index e42f758a0fbd..055ea9941dd5 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper;
50/* Recognized hypervisors */ 50/* Recognized hypervisors */
51extern const struct hypervisor_x86 x86_hyper_vmware; 51extern const struct hypervisor_x86 x86_hyper_vmware;
52extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 52extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
53extern const struct hypervisor_x86 x86_hyper_xen_hvm; 53extern const struct hypervisor_x86 x86_hyper_xen;
54extern const struct hypervisor_x86 x86_hyper_kvm; 54extern const struct hypervisor_x86 x86_hyper_kvm;
55 55
56extern void init_hypervisor(struct cpuinfo_x86 *c); 56extern void init_hypervisor(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index cf87de3fc390..64b611782ef0 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
169 struct __raw_tickets tmp = READ_ONCE(lock->tickets); 169 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
170 170
171 tmp.head &= ~TICKET_SLOWPATH_FLAG; 171 tmp.head &= ~TICKET_SLOWPATH_FLAG;
172 return (tmp.tail - tmp.head) > TICKET_LOCK_INC; 172 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
173} 173}
174#define arch_spin_is_contended arch_spin_is_contended 174#define arch_spin_is_contended arch_spin_is_contended
175 175
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 358dcd338915..c44a5d53e464 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
269 return false; 269 return false;
270} 270}
271 271
272static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
273{
274 return __get_free_pages(__GFP_NOWARN, order);
275}
276
272#endif /* _ASM_X86_XEN_PAGE_H */ 277#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 36ce402a3fa5..d820d8eae96b 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -27,8 +27,8 @@
27 27
28static const __initconst struct hypervisor_x86 * const hypervisors[] = 28static const __initconst struct hypervisor_x86 * const hypervisors[] =
29{ 29{
30#ifdef CONFIG_XEN_PVHVM 30#ifdef CONFIG_XEN
31 &x86_hyper_xen_hvm, 31 &x86_hyper_xen,
32#endif 32#endif
33 &x86_hyper_vmware, 33 &x86_hyper_vmware,
34 &x86_hyper_ms_hyperv, 34 &x86_hyper_ms_hyperv,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 219d3fb423a1..960e85de13fb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2533,34 +2533,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
2533 return x86_event_sysfs_show(page, config, event); 2533 return x86_event_sysfs_show(page, config, event);
2534} 2534}
2535 2535
2536static __initconst const struct x86_pmu core_pmu = {
2537 .name = "core",
2538 .handle_irq = x86_pmu_handle_irq,
2539 .disable_all = x86_pmu_disable_all,
2540 .enable_all = core_pmu_enable_all,
2541 .enable = core_pmu_enable_event,
2542 .disable = x86_pmu_disable_event,
2543 .hw_config = x86_pmu_hw_config,
2544 .schedule_events = x86_schedule_events,
2545 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2546 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2547 .event_map = intel_pmu_event_map,
2548 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2549 .apic = 1,
2550 /*
2551 * Intel PMCs cannot be accessed sanely above 32 bit width,
2552 * so we install an artificial 1<<31 period regardless of
2553 * the generic event period:
2554 */
2555 .max_period = (1ULL << 31) - 1,
2556 .get_event_constraints = intel_get_event_constraints,
2557 .put_event_constraints = intel_put_event_constraints,
2558 .event_constraints = intel_core_event_constraints,
2559 .guest_get_msrs = core_guest_get_msrs,
2560 .format_attrs = intel_arch_formats_attr,
2561 .events_sysfs_show = intel_event_sysfs_show,
2562};
2563
2564struct intel_shared_regs *allocate_shared_regs(int cpu) 2536struct intel_shared_regs *allocate_shared_regs(int cpu)
2565{ 2537{
2566 struct intel_shared_regs *regs; 2538 struct intel_shared_regs *regs;
@@ -2743,6 +2715,44 @@ static struct attribute *intel_arch3_formats_attr[] = {
2743 NULL, 2715 NULL,
2744}; 2716};
2745 2717
2718static __initconst const struct x86_pmu core_pmu = {
2719 .name = "core",
2720 .handle_irq = x86_pmu_handle_irq,
2721 .disable_all = x86_pmu_disable_all,
2722 .enable_all = core_pmu_enable_all,
2723 .enable = core_pmu_enable_event,
2724 .disable = x86_pmu_disable_event,
2725 .hw_config = x86_pmu_hw_config,
2726 .schedule_events = x86_schedule_events,
2727 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2728 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2729 .event_map = intel_pmu_event_map,
2730 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2731 .apic = 1,
2732 /*
2733 * Intel PMCs cannot be accessed sanely above 32-bit width,
2734 * so we install an artificial 1<<31 period regardless of
2735 * the generic event period:
2736 */
2737 .max_period = (1ULL<<31) - 1,
2738 .get_event_constraints = intel_get_event_constraints,
2739 .put_event_constraints = intel_put_event_constraints,
2740 .event_constraints = intel_core_event_constraints,
2741 .guest_get_msrs = core_guest_get_msrs,
2742 .format_attrs = intel_arch_formats_attr,
2743 .events_sysfs_show = intel_event_sysfs_show,
2744
2745 /*
2746 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
2747 * together with PMU version 1 and thus be using core_pmu with
2748 * shared_regs. We need following callbacks here to allocate
2749 * it properly.
2750 */
2751 .cpu_prepare = intel_pmu_cpu_prepare,
2752 .cpu_starting = intel_pmu_cpu_starting,
2753 .cpu_dying = intel_pmu_cpu_dying,
2754};
2755
2746static __initconst const struct x86_pmu intel_pmu = { 2756static __initconst const struct x86_pmu intel_pmu = {
2747 .name = "Intel", 2757 .name = "Intel",
2748 .handle_irq = intel_pmu_handle_irq, 2758 .handle_irq = intel_pmu_handle_irq,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
index 3001015b755c..4562e9e22c60 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -1,6 +1,13 @@
1/* Nehalem/SandBridge/Haswell uncore support */ 1/* Nehalem/SandBridge/Haswell uncore support */
2#include "perf_event_intel_uncore.h" 2#include "perf_event_intel_uncore.h"
3 3
4/* Uncore IMC PCI IDs */
5#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
6#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
7#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10
4/* SNB event control */ 11/* SNB event control */
5#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 12#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
6#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 13#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
@@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = {
472 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), 479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
473 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 480 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
474 }, 481 },
482 { /* IMC */
483 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
484 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
485 },
475 { /* end: all zeroes */ }, 486 { /* end: all zeroes */ },
476}; 487};
477 488
@@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
502 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ 513 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
503 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ 514 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
504 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 515 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
516 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
505 { /* end marker */ } 517 { /* end marker */ }
506}; 518};
507 519
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8213da62b1b7..6e338e3b1dc0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
57 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, 57 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
58#endif 58#endif
59}; 59};
60EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss); 60EXPORT_PER_CPU_SYMBOL(cpu_tss);
61 61
62#ifdef CONFIG_X86_64 62#ifdef CONFIG_X86_64
63static DEFINE_PER_CPU(unsigned char, is_idle); 63static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -156,11 +156,13 @@ void flush_thread(void)
156 /* FPU state will be reallocated lazily at the first use. */ 156 /* FPU state will be reallocated lazily at the first use. */
157 drop_fpu(tsk); 157 drop_fpu(tsk);
158 free_thread_xstate(tsk); 158 free_thread_xstate(tsk);
159 } else if (!used_math()) { 159 } else {
160 /* kthread execs. TODO: cleanup this horror. */ 160 if (!tsk_used_math(tsk)) {
161 if (WARN_ON(init_fpu(tsk))) 161 /* kthread execs. TODO: cleanup this horror. */
162 force_sig(SIGKILL, tsk); 162 if (WARN_ON(init_fpu(tsk)))
163 user_fpu_begin(); 163 force_sig(SIGKILL, tsk);
164 user_fpu_begin();
165 }
164 restore_init_xstate(); 166 restore_init_xstate();
165 } 167 }
166} 168}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 5ead4d6cf3a7..70e7444c6835 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void)
351 */ 351 */
352void *xlate_dev_mem_ptr(phys_addr_t phys) 352void *xlate_dev_mem_ptr(phys_addr_t phys)
353{ 353{
354 void *addr; 354 unsigned long start = phys & PAGE_MASK;
355 unsigned long start = phys & PAGE_MASK; 355 unsigned long offset = phys & ~PAGE_MASK;
356 unsigned long vaddr;
356 357
357 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ 358 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
358 if (page_is_ram(start >> PAGE_SHIFT)) 359 if (page_is_ram(start >> PAGE_SHIFT))
359 return __va(phys); 360 return __va(phys);
360 361
361 addr = (void __force *)ioremap_cache(start, PAGE_SIZE); 362 vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
362 if (addr) 363 /* Only add the offset on success and return NULL if the ioremap() failed: */
363 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); 364 if (vaddr)
365 vaddr += offset;
364 366
365 return addr; 367 return (void *)vaddr;
366} 368}
367 369
368void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 370void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 987514396c1e..99f76103c6b7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
559 if (is_ereg(dst_reg)) 559 if (is_ereg(dst_reg))
560 EMIT1(0x41); 560 EMIT1(0x41);
561 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); 561 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
562
563 /* emit 'movzwl eax, ax' */
564 if (is_ereg(dst_reg))
565 EMIT3(0x45, 0x0F, 0xB7);
566 else
567 EMIT2(0x0F, 0xB7);
568 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
562 break; 569 break;
563 case 32: 570 case 32:
564 /* emit 'bswap eax' to swap lower 4 bytes */ 571 /* emit 'bswap eax' to swap lower 4 bytes */
@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
577 break; 584 break;
578 585
579 case BPF_ALU | BPF_END | BPF_FROM_LE: 586 case BPF_ALU | BPF_END | BPF_FROM_LE:
587 switch (imm32) {
588 case 16:
589 /* emit 'movzwl eax, ax' to zero extend 16-bit
590 * into 64 bit
591 */
592 if (is_ereg(dst_reg))
593 EMIT3(0x45, 0x0F, 0xB7);
594 else
595 EMIT2(0x0F, 0xB7);
596 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
597 break;
598 case 32:
599 /* emit 'mov eax, eax' to clear upper 32-bits */
600 if (is_ereg(dst_reg))
601 EMIT1(0x45);
602 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
603 break;
604 case 64:
605 /* nop */
606 break;
607 }
580 break; 608 break;
581 609
582 /* ST: *(u8*)(dst_reg + off) = imm */ 610 /* ST: *(u8*)(dst_reg + off) = imm */
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index e4695985f9de..d93963340c3c 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
325 kfree(info); 325 kfree(info);
326} 326}
327 327
328/*
329 * An IO port or MMIO resource assigned to a PCI host bridge may be
330 * consumed by the host bridge itself or available to its child
331 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
332 * to tell whether the resource is consumed by the host bridge itself,
333 * but firmware hasn't used that bit consistently, so we can't rely on it.
334 *
335 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
336 * to be available to child bus/devices except one special case:
337 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
338 * to access PCI configuration space.
339 *
340 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
341 */
342static bool resource_is_pcicfg_ioport(struct resource *res)
343{
344 return (res->flags & IORESOURCE_IO) &&
345 res->start == 0xCF8 && res->end == 0xCFF;
346}
347
328static void probe_pci_root_info(struct pci_root_info *info, 348static void probe_pci_root_info(struct pci_root_info *info,
329 struct acpi_device *device, 349 struct acpi_device *device,
330 int busnum, int domain, 350 int busnum, int domain,
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
346 "no IO and memory resources present in _CRS\n"); 366 "no IO and memory resources present in _CRS\n");
347 else 367 else
348 resource_list_for_each_entry_safe(entry, tmp, list) { 368 resource_list_for_each_entry_safe(entry, tmp, list) {
349 if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || 369 if ((entry->res->flags & IORESOURCE_DISABLED) ||
350 (entry->res->flags & IORESOURCE_DISABLED)) 370 resource_is_pcicfg_ioport(entry->res))
351 resource_list_destroy_entry(entry); 371 resource_list_destroy_entry(entry);
352 else 372 else
353 entry->res->name = info->name; 373 entry->res->name = info->name;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 94578efd3067..46957ead3060 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = {
1760 1760
1761static void __init xen_hvm_guest_init(void) 1761static void __init xen_hvm_guest_init(void)
1762{ 1762{
1763 if (xen_pv_domain())
1764 return;
1765
1763 init_hvm_pv_info(); 1766 init_hvm_pv_info();
1764 1767
1765 xen_hvm_init_shared_info(); 1768 xen_hvm_init_shared_info();
@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void)
1775 xen_hvm_init_time_ops(); 1778 xen_hvm_init_time_ops();
1776 xen_hvm_init_mmu_ops(); 1779 xen_hvm_init_mmu_ops();
1777} 1780}
1781#endif
1778 1782
1779static bool xen_nopv = false; 1783static bool xen_nopv = false;
1780static __init int xen_parse_nopv(char *arg) 1784static __init int xen_parse_nopv(char *arg)
@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg)
1784} 1788}
1785early_param("xen_nopv", xen_parse_nopv); 1789early_param("xen_nopv", xen_parse_nopv);
1786 1790
1787static uint32_t __init xen_hvm_platform(void) 1791static uint32_t __init xen_platform(void)
1788{ 1792{
1789 if (xen_nopv) 1793 if (xen_nopv)
1790 return 0; 1794 return 0;
1791 1795
1792 if (xen_pv_domain())
1793 return 0;
1794
1795 return xen_cpuid_base(); 1796 return xen_cpuid_base();
1796} 1797}
1797 1798
@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void)
1809} 1810}
1810EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1811EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1811 1812
1812const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { 1813static void xen_set_cpu_features(struct cpuinfo_x86 *c)
1813 .name = "Xen HVM", 1814{
1814 .detect = xen_hvm_platform, 1815 if (xen_pv_domain())
1816 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1817}
1818
1819const struct hypervisor_x86 x86_hyper_xen = {
1820 .name = "Xen",
1821 .detect = xen_platform,
1822#ifdef CONFIG_XEN_PVHVM
1815 .init_platform = xen_hvm_guest_init, 1823 .init_platform = xen_hvm_guest_init,
1824#endif
1816 .x2apic_available = xen_x2apic_para_available, 1825 .x2apic_available = xen_x2apic_para_available,
1826 .set_cpu_features = xen_set_cpu_features,
1817}; 1827};
1818EXPORT_SYMBOL(x86_hyper_xen_hvm); 1828EXPORT_SYMBOL(x86_hyper_xen);
1819#endif
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index d9497698645a..53b4c0811f4f 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data)
88 tick_resume_local(); 88 tick_resume_local();
89} 89}
90 90
91static void xen_vcpu_notify_suspend(void *data)
92{
93 tick_suspend_local();
94}
95
91void xen_arch_resume(void) 96void xen_arch_resume(void)
92{ 97{
93 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 98 on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
94} 99}
100
101void xen_arch_suspend(void)
102{
103 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
104}
diff --git a/block/blk-core.c b/block/blk-core.c
index fd154b94447a..7871603f0a29 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
552 q->queue_lock = &q->__queue_lock; 552 q->queue_lock = &q->__queue_lock;
553 spin_unlock_irq(lock); 553 spin_unlock_irq(lock);
554 554
555 bdi_destroy(&q->backing_dev_info);
556
555 /* @q is and will stay empty, shutdown and put */ 557 /* @q is and will stay empty, shutdown and put */
556 blk_put_queue(q); 558 blk_put_queue(q);
557} 559}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ade8a2d1b0aa..e68b71b85a7e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
677 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 677 data.next = blk_rq_timeout(round_jiffies_up(data.next));
678 mod_timer(&q->timeout, data.next); 678 mod_timer(&q->timeout, data.next);
679 } else { 679 } else {
680 queue_for_each_hw_ctx(q, hctx, i) 680 queue_for_each_hw_ctx(q, hctx, i) {
681 blk_mq_tag_idle(hctx); 681 /* the hctx may be unmapped, so check it here */
682 if (blk_mq_hw_queue_mapped(hctx))
683 blk_mq_tag_idle(hctx);
684 }
682 } 685 }
683} 686}
684 687
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
855 spin_lock(&hctx->lock); 858 spin_lock(&hctx->lock);
856 list_splice(&rq_list, &hctx->dispatch); 859 list_splice(&rq_list, &hctx->dispatch);
857 spin_unlock(&hctx->lock); 860 spin_unlock(&hctx->lock);
861 /*
862 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
863 * it's possible the queue is stopped and restarted again
864 * before this. Queue restart will dispatch requests. And since
865 * requests in rq_list aren't added into hctx->dispatch yet,
866 * the requests in rq_list might get lost.
867 *
868 * blk_mq_run_hw_queue() already checks the STOPPED bit
869 **/
870 blk_mq_run_hw_queue(hctx, true);
858 } 871 }
859} 872}
860 873
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1571 return NOTIFY_OK; 1584 return NOTIFY_OK;
1572} 1585}
1573 1586
1574static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1575{
1576 struct request_queue *q = hctx->queue;
1577 struct blk_mq_tag_set *set = q->tag_set;
1578
1579 if (set->tags[hctx->queue_num])
1580 return NOTIFY_OK;
1581
1582 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1583 if (!set->tags[hctx->queue_num])
1584 return NOTIFY_STOP;
1585
1586 hctx->tags = set->tags[hctx->queue_num];
1587 return NOTIFY_OK;
1588}
1589
1590static int blk_mq_hctx_notify(void *data, unsigned long action, 1587static int blk_mq_hctx_notify(void *data, unsigned long action,
1591 unsigned int cpu) 1588 unsigned int cpu)
1592{ 1589{
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
1594 1591
1595 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 1592 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1596 return blk_mq_hctx_cpu_offline(hctx, cpu); 1593 return blk_mq_hctx_cpu_offline(hctx, cpu);
1597 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 1594
1598 return blk_mq_hctx_cpu_online(hctx, cpu); 1595 /*
1596 * In case of CPU online, tags may be reallocated
1597 * in blk_mq_map_swqueue() after mapping is updated.
1598 */
1599 1599
1600 return NOTIFY_OK; 1600 return NOTIFY_OK;
1601} 1601}
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1775 unsigned int i; 1775 unsigned int i;
1776 struct blk_mq_hw_ctx *hctx; 1776 struct blk_mq_hw_ctx *hctx;
1777 struct blk_mq_ctx *ctx; 1777 struct blk_mq_ctx *ctx;
1778 struct blk_mq_tag_set *set = q->tag_set;
1778 1779
1779 queue_for_each_hw_ctx(q, hctx, i) { 1780 queue_for_each_hw_ctx(q, hctx, i) {
1780 cpumask_clear(hctx->cpumask); 1781 cpumask_clear(hctx->cpumask);
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1803 * disable it and free the request entries. 1804 * disable it and free the request entries.
1804 */ 1805 */
1805 if (!hctx->nr_ctx) { 1806 if (!hctx->nr_ctx) {
1806 struct blk_mq_tag_set *set = q->tag_set;
1807
1808 if (set->tags[i]) { 1807 if (set->tags[i]) {
1809 blk_mq_free_rq_map(set, set->tags[i], i); 1808 blk_mq_free_rq_map(set, set->tags[i], i);
1810 set->tags[i] = NULL; 1809 set->tags[i] = NULL;
1811 hctx->tags = NULL;
1812 } 1810 }
1811 hctx->tags = NULL;
1813 continue; 1812 continue;
1814 } 1813 }
1815 1814
1815 /* unmapped hw queue can be remapped after CPU topo changed */
1816 if (!set->tags[i])
1817 set->tags[i] = blk_mq_init_rq_map(set, i);
1818 hctx->tags = set->tags[i];
1819 WARN_ON(!hctx->tags);
1820
1816 /* 1821 /*
1817 * Set the map size to the number of mapped software queues. 1822 * Set the map size to the number of mapped software queues.
1818 * This is more accurate and more efficient than looping 1823 * This is more accurate and more efficient than looping
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2090 */ 2095 */
2091 list_for_each_entry(q, &all_q_list, all_q_node) 2096 list_for_each_entry(q, &all_q_list, all_q_node)
2092 blk_mq_freeze_queue_start(q); 2097 blk_mq_freeze_queue_start(q);
2093 list_for_each_entry(q, &all_q_list, all_q_node) 2098 list_for_each_entry(q, &all_q_list, all_q_node) {
2094 blk_mq_freeze_queue_wait(q); 2099 blk_mq_freeze_queue_wait(q);
2095 2100
2101 /*
2102 * timeout handler can't touch hw queue during the
2103 * reinitialization
2104 */
2105 del_timer_sync(&q->timeout);
2106 }
2107
2096 list_for_each_entry(q, &all_q_list, all_q_node) 2108 list_for_each_entry(q, &all_q_list, all_q_node)
2097 blk_mq_queue_reinit(q); 2109 blk_mq_queue_reinit(q);
2098 2110
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index faaf36ade7eb..2b8fd302f677 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
522 522
523 blk_trace_shutdown(q); 523 blk_trace_shutdown(q);
524 524
525 bdi_destroy(&q->backing_dev_info);
526
527 ida_simple_remove(&blk_queue_ida, q->id); 525 ida_simple_remove(&blk_queue_ida, q->id);
528 call_rcu(&q->rcu_head, blk_free_queue_rcu); 526 call_rcu(&q->rcu_head, blk_free_queue_rcu);
529} 527}
diff --git a/block/bounce.c b/block/bounce.c
index ab21ba203d5c..ed9dd8067120 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -221,8 +221,8 @@ bounce:
221 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) 221 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
222 continue; 222 continue;
223 223
224 inc_zone_page_state(to->bv_page, NR_BOUNCE);
225 to->bv_page = mempool_alloc(pool, q->bounce_gfp); 224 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
225 inc_zone_page_state(to->bv_page, NR_BOUNCE);
226 226
227 if (rw == WRITE) { 227 if (rw == WRITE) {
228 char *vto, *vfrom; 228 char *vto, *vfrom;
diff --git a/block/elevator.c b/block/elevator.c
index 59794d0d38e3..8985038f398c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
157 157
158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); 158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
159 if (unlikely(!eq)) 159 if (unlikely(!eq))
160 goto err; 160 return NULL;
161 161
162 eq->type = e; 162 eq->type = e;
163 kobject_init(&eq->kobj, &elv_ktype); 163 kobject_init(&eq->kobj, &elv_ktype);
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
165 hash_init(eq->hash); 165 hash_init(eq->hash);
166 166
167 return eq; 167 return eq;
168err:
169 kfree(eq);
170 elevator_put(e);
171 return NULL;
172} 168}
173EXPORT_SYMBOL(elevator_alloc); 169EXPORT_SYMBOL(elevator_alloc);
174 170
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index b193f8425999..ff6d8adc9cda 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
304 {"PNPb006"}, 304 {"PNPb006"},
305 /* cs423x-pnpbios */ 305 /* cs423x-pnpbios */
306 {"CSC0100"}, 306 {"CSC0100"},
307 {"CSC0103"},
308 {"CSC0110"},
307 {"CSC0000"}, 309 {"CSC0000"},
308 {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ 310 {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
309 /* es18xx-pnpbios */ 311 /* es18xx-pnpbios */
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 5589a6e2a023..8244f013f210 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
573 * @ares: Input ACPI resource object. 573 * @ares: Input ACPI resource object.
574 * @types: Valid resource types of IORESOURCE_XXX 574 * @types: Valid resource types of IORESOURCE_XXX
575 * 575 *
576 * This is a hepler function to support acpi_dev_get_resources(), which filters 576 * This is a helper function to support acpi_dev_get_resources(), which filters
577 * ACPI resource objects according to resource types. 577 * ACPI resource objects according to resource types.
578 */ 578 */
579int acpi_dev_filter_resource_type(struct acpi_resource *ares, 579int acpi_dev_filter_resource_type(struct acpi_resource *ares,
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 26e5b5060523..bf034f8b7c1a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/dmi.h>
17#include "sbshc.h" 18#include "sbshc.h"
18 19
19#define PREFIX "ACPI: " 20#define PREFIX "ACPI: "
@@ -87,6 +88,8 @@ enum acpi_smb_offset {
87 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ 88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
88}; 89};
89 90
91static bool macbook;
92
90static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) 93static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
91{ 94{
92 return ec_read(hc->offset + address, data); 95 return ec_read(hc->offset + address, data);
@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
132 } 135 }
133 136
134 mutex_lock(&hc->lock); 137 mutex_lock(&hc->lock);
138 if (macbook)
139 udelay(5);
135 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) 140 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
136 goto end; 141 goto end;
137 if (temp) { 142 if (temp) {
@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
257 acpi_handle handle, acpi_ec_query_func func, 262 acpi_handle handle, acpi_ec_query_func func,
258 void *data); 263 void *data);
259 264
265static int macbook_dmi_match(const struct dmi_system_id *d)
266{
267 pr_debug("Detected MacBook, enabling workaround\n");
268 macbook = true;
269 return 0;
270}
271
272static struct dmi_system_id acpi_smbus_dmi_table[] = {
273 { macbook_dmi_match, "Apple MacBook", {
274 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
275 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
276 },
277 { },
278};
279
260static int acpi_smbus_hc_add(struct acpi_device *device) 280static int acpi_smbus_hc_add(struct acpi_device *device)
261{ 281{
262 int status; 282 int status;
263 unsigned long long val; 283 unsigned long long val;
264 struct acpi_smb_hc *hc; 284 struct acpi_smb_hc *hc;
265 285
286 dmi_check_system(acpi_smbus_dmi_table);
287
266 if (!device) 288 if (!device)
267 return -EINVAL; 289 return -EINVAL;
268 290
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5f601553b9b0..9dca4b995be0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -270,6 +270,7 @@ config ATA_PIIX
270config SATA_DWC 270config SATA_DWC
271 tristate "DesignWare Cores SATA support" 271 tristate "DesignWare Cores SATA support"
272 depends on 460EX 272 depends on 460EX
273 select DW_DMAC
273 help 274 help
274 This option enables support for the on-chip SATA controller of the 275 This option enables support for the on-chip SATA controller of the
275 AppliedMicro processor 460EX. 276 AppliedMicro processor 460EX.
@@ -729,15 +730,6 @@ config PATA_SC1200
729 730
730 If unsure, say N. 731 If unsure, say N.
731 732
732config PATA_SCC
733 tristate "Toshiba's Cell Reference Set IDE support"
734 depends on PCI && PPC_CELLEB
735 help
736 This option enables support for the built-in IDE controller on
737 Toshiba Cell Reference Board.
738
739 If unsure, say N.
740
741config PATA_SCH 733config PATA_SCH
742 tristate "Intel SCH PATA support" 734 tristate "Intel SCH PATA support"
743 depends on PCI 735 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index b67e995179a9..40f7865f20a1 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -75,7 +75,6 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
75obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o 75obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
76obj-$(CONFIG_PATA_RDC) += pata_rdc.o 76obj-$(CONFIG_PATA_RDC) += pata_rdc.o
77obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o 77obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
78obj-$(CONFIG_PATA_SCC) += pata_scc.o
79obj-$(CONFIG_PATA_SCH) += pata_sch.o 78obj-$(CONFIG_PATA_SCH) += pata_sch.o
80obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o 79obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
81obj-$(CONFIG_PATA_SIL680) += pata_sil680.o 80obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c7a92a743ed0..65ee94454bbd 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -66,6 +66,7 @@ enum board_ids {
66 board_ahci_yes_fbs, 66 board_ahci_yes_fbs,
67 67
68 /* board IDs for specific chipsets in alphabetical order */ 68 /* board IDs for specific chipsets in alphabetical order */
69 board_ahci_avn,
69 board_ahci_mcp65, 70 board_ahci_mcp65,
70 board_ahci_mcp77, 71 board_ahci_mcp77,
71 board_ahci_mcp89, 72 board_ahci_mcp89,
@@ -84,6 +85,8 @@ enum board_ids {
84static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 85static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
85static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 86static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
86 unsigned long deadline); 87 unsigned long deadline);
88static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
89 unsigned long deadline);
87static void ahci_mcp89_apple_enable(struct pci_dev *pdev); 90static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
88static bool is_mcp89_apple(struct pci_dev *pdev); 91static bool is_mcp89_apple(struct pci_dev *pdev);
89static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 92static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
107 .hardreset = ahci_p5wdh_hardreset, 110 .hardreset = ahci_p5wdh_hardreset,
108}; 111};
109 112
113static struct ata_port_operations ahci_avn_ops = {
114 .inherits = &ahci_ops,
115 .hardreset = ahci_avn_hardreset,
116};
117
110static const struct ata_port_info ahci_port_info[] = { 118static const struct ata_port_info ahci_port_info[] = {
111 /* by features */ 119 /* by features */
112 [board_ahci] = { 120 [board_ahci] = {
@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
151 .port_ops = &ahci_ops, 159 .port_ops = &ahci_ops,
152 }, 160 },
153 /* by chipsets */ 161 /* by chipsets */
162 [board_ahci_avn] = {
163 .flags = AHCI_FLAG_COMMON,
164 .pio_mask = ATA_PIO4,
165 .udma_mask = ATA_UDMA6,
166 .port_ops = &ahci_avn_ops,
167 },
154 [board_ahci_mcp65] = { 168 [board_ahci_mcp65] = {
155 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | 169 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
156 AHCI_HFLAG_YES_NCQ), 170 AHCI_HFLAG_YES_NCQ),
@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
290 { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ 304 { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
291 { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ 305 { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
292 { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ 306 { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
293 { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */ 307 { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
294 { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */ 308 { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
295 { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */ 309 { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
296 { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */ 310 { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
297 { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */ 311 { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
298 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ 312 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
299 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ 313 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
300 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ 314 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
301 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ 315 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
302 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ 316 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
303 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ 317 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
670 return rc; 684 return rc;
671} 685}
672 686
687/*
688 * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
689 *
690 * It has been observed with some SSDs that the timing of events in the
691 * link synchronization phase can leave the port in a state that can not
692 * be recovered by a SATA-hard-reset alone. The failing signature is
693 * SStatus.DET stuck at 1 ("Device presence detected but Phy
694 * communication not established"). It was found that unloading and
695 * reloading the driver when this problem occurs allows the drive
696 * connection to be recovered (DET advanced to 0x3). The critical
697 * component of reloading the driver is that the port state machines are
698 * reset by bouncing "port enable" in the AHCI PCS configuration
699 * register. So, reproduce that effect by bouncing a port whenever we
700 * see DET==1 after a reset.
701 */
702static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
703 unsigned long deadline)
704{
705 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
706 struct ata_port *ap = link->ap;
707 struct ahci_port_priv *pp = ap->private_data;
708 struct ahci_host_priv *hpriv = ap->host->private_data;
709 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
710 unsigned long tmo = deadline - jiffies;
711 struct ata_taskfile tf;
712 bool online;
713 int rc, i;
714
715 DPRINTK("ENTER\n");
716
717 ahci_stop_engine(ap);
718
719 for (i = 0; i < 2; i++) {
720 u16 val;
721 u32 sstatus;
722 int port = ap->port_no;
723 struct ata_host *host = ap->host;
724 struct pci_dev *pdev = to_pci_dev(host->dev);
725
726 /* clear D2H reception area to properly wait for D2H FIS */
727 ata_tf_init(link->device, &tf);
728 tf.command = ATA_BUSY;
729 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
730
731 rc = sata_link_hardreset(link, timing, deadline, &online,
732 ahci_check_ready);
733
734 if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
735 (sstatus & 0xf) != 1)
736 break;
737
738 ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
739 port);
740
741 pci_read_config_word(pdev, 0x92, &val);
742 val &= ~(1 << port);
743 pci_write_config_word(pdev, 0x92, val);
744 ata_msleep(ap, 1000);
745 val |= 1 << port;
746 pci_write_config_word(pdev, 0x92, val);
747 deadline += tmo;
748 }
749
750 hpriv->start_engine(ap);
751
752 if (online)
753 *class = ahci_dev_classify(ap);
754
755 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
756 return rc;
757}
758
759
673#ifdef CONFIG_PM 760#ifdef CONFIG_PM
674static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 761static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
675{ 762{
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index ea0ff005b86c..8ff428fe8e0f 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -37,7 +37,6 @@ struct st_ahci_drv_data {
37 struct reset_control *pwr; 37 struct reset_control *pwr;
38 struct reset_control *sw_rst; 38 struct reset_control *sw_rst;
39 struct reset_control *pwr_rst; 39 struct reset_control *pwr_rst;
40 struct ahci_host_priv *hpriv;
41}; 40};
42 41
43static void st_ahci_configure_oob(void __iomem *mmio) 42static void st_ahci_configure_oob(void __iomem *mmio)
@@ -55,9 +54,10 @@ static void st_ahci_configure_oob(void __iomem *mmio)
55 writel(new_val, mmio + ST_AHCI_OOBR); 54 writel(new_val, mmio + ST_AHCI_OOBR);
56} 55}
57 56
58static int st_ahci_deassert_resets(struct device *dev) 57static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv,
58 struct device *dev)
59{ 59{
60 struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); 60 struct st_ahci_drv_data *drv_data = hpriv->plat_data;
61 int err; 61 int err;
62 62
63 if (drv_data->pwr) { 63 if (drv_data->pwr) {
@@ -90,8 +90,8 @@ static int st_ahci_deassert_resets(struct device *dev)
90static void st_ahci_host_stop(struct ata_host *host) 90static void st_ahci_host_stop(struct ata_host *host)
91{ 91{
92 struct ahci_host_priv *hpriv = host->private_data; 92 struct ahci_host_priv *hpriv = host->private_data;
93 struct st_ahci_drv_data *drv_data = hpriv->plat_data;
93 struct device *dev = host->dev; 94 struct device *dev = host->dev;
94 struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
95 int err; 95 int err;
96 96
97 if (drv_data->pwr) { 97 if (drv_data->pwr) {
@@ -103,29 +103,30 @@ static void st_ahci_host_stop(struct ata_host *host)
103 ahci_platform_disable_resources(hpriv); 103 ahci_platform_disable_resources(hpriv);
104} 104}
105 105
106static int st_ahci_probe_resets(struct platform_device *pdev) 106static int st_ahci_probe_resets(struct ahci_host_priv *hpriv,
107 struct device *dev)
107{ 108{
108 struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev); 109 struct st_ahci_drv_data *drv_data = hpriv->plat_data;
109 110
110 drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn"); 111 drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn");
111 if (IS_ERR(drv_data->pwr)) { 112 if (IS_ERR(drv_data->pwr)) {
112 dev_info(&pdev->dev, "power reset control not defined\n"); 113 dev_info(dev, "power reset control not defined\n");
113 drv_data->pwr = NULL; 114 drv_data->pwr = NULL;
114 } 115 }
115 116
116 drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst"); 117 drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst");
117 if (IS_ERR(drv_data->sw_rst)) { 118 if (IS_ERR(drv_data->sw_rst)) {
118 dev_info(&pdev->dev, "soft reset control not defined\n"); 119 dev_info(dev, "soft reset control not defined\n");
119 drv_data->sw_rst = NULL; 120 drv_data->sw_rst = NULL;
120 } 121 }
121 122
122 drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst"); 123 drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst");
123 if (IS_ERR(drv_data->pwr_rst)) { 124 if (IS_ERR(drv_data->pwr_rst)) {
124 dev_dbg(&pdev->dev, "power soft reset control not defined\n"); 125 dev_dbg(dev, "power soft reset control not defined\n");
125 drv_data->pwr_rst = NULL; 126 drv_data->pwr_rst = NULL;
126 } 127 }
127 128
128 return st_ahci_deassert_resets(&pdev->dev); 129 return st_ahci_deassert_resets(hpriv, dev);
129} 130}
130 131
131static struct ata_port_operations st_ahci_port_ops = { 132static struct ata_port_operations st_ahci_port_ops = {
@@ -154,15 +155,12 @@ static int st_ahci_probe(struct platform_device *pdev)
154 if (!drv_data) 155 if (!drv_data)
155 return -ENOMEM; 156 return -ENOMEM;
156 157
157 platform_set_drvdata(pdev, drv_data);
158
159 hpriv = ahci_platform_get_resources(pdev); 158 hpriv = ahci_platform_get_resources(pdev);
160 if (IS_ERR(hpriv)) 159 if (IS_ERR(hpriv))
161 return PTR_ERR(hpriv); 160 return PTR_ERR(hpriv);
161 hpriv->plat_data = drv_data;
162 162
163 drv_data->hpriv = hpriv; 163 err = st_ahci_probe_resets(hpriv, &pdev->dev);
164
165 err = st_ahci_probe_resets(pdev);
166 if (err) 164 if (err)
167 return err; 165 return err;
168 166
@@ -170,7 +168,7 @@ static int st_ahci_probe(struct platform_device *pdev)
170 if (err) 168 if (err)
171 return err; 169 return err;
172 170
173 st_ahci_configure_oob(drv_data->hpriv->mmio); 171 st_ahci_configure_oob(hpriv->mmio);
174 172
175 err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 173 err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
176 &ahci_platform_sht); 174 &ahci_platform_sht);
@@ -185,8 +183,9 @@ static int st_ahci_probe(struct platform_device *pdev)
185#ifdef CONFIG_PM_SLEEP 183#ifdef CONFIG_PM_SLEEP
186static int st_ahci_suspend(struct device *dev) 184static int st_ahci_suspend(struct device *dev)
187{ 185{
188 struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); 186 struct ata_host *host = dev_get_drvdata(dev);
189 struct ahci_host_priv *hpriv = drv_data->hpriv; 187 struct ahci_host_priv *hpriv = host->private_data;
188 struct st_ahci_drv_data *drv_data = hpriv->plat_data;
190 int err; 189 int err;
191 190
192 err = ahci_platform_suspend_host(dev); 191 err = ahci_platform_suspend_host(dev);
@@ -208,21 +207,21 @@ static int st_ahci_suspend(struct device *dev)
208 207
209static int st_ahci_resume(struct device *dev) 208static int st_ahci_resume(struct device *dev)
210{ 209{
211 struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); 210 struct ata_host *host = dev_get_drvdata(dev);
212 struct ahci_host_priv *hpriv = drv_data->hpriv; 211 struct ahci_host_priv *hpriv = host->private_data;
213 int err; 212 int err;
214 213
215 err = ahci_platform_enable_resources(hpriv); 214 err = ahci_platform_enable_resources(hpriv);
216 if (err) 215 if (err)
217 return err; 216 return err;
218 217
219 err = st_ahci_deassert_resets(dev); 218 err = st_ahci_deassert_resets(hpriv, dev);
220 if (err) { 219 if (err) {
221 ahci_platform_disable_resources(hpriv); 220 ahci_platform_disable_resources(hpriv);
222 return err; 221 return err;
223 } 222 }
224 223
225 st_ahci_configure_oob(drv_data->hpriv->mmio); 224 st_ahci_configure_oob(hpriv->mmio);
226 225
227 return ahci_platform_resume_host(dev); 226 return ahci_platform_resume_host(dev);
228} 227}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 61a9c07e0dff..287c4ba0219f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
1707 if (unlikely(resetting)) 1707 if (unlikely(resetting))
1708 status &= ~PORT_IRQ_BAD_PMP; 1708 status &= ~PORT_IRQ_BAD_PMP;
1709 1709
1710 /* if LPM is enabled, PHYRDY doesn't mean anything */ 1710 if (sata_lpm_ignore_phy_events(&ap->link)) {
1711 if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
1712 status &= ~PORT_IRQ_PHYRDY; 1711 status &= ~PORT_IRQ_PHYRDY;
1713 ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); 1712 ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
1714 } 1713 }
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f6cb1f1b30b7..577849c6611a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4235,7 +4235,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4235 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4236 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4236 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4237 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4237 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4238 { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4239 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4240 4240
4241 /* 4241 /*
@@ -6752,6 +6752,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6752 return tmp; 6752 return tmp;
6753} 6753}
6754 6754
6755/**
6756 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
6757 * @link: Link receiving the event
6758 *
6759 * Test whether the received PHY event has to be ignored or not.
6760 *
6761 * LOCKING:
6762 * None:
6763 *
6764 * RETURNS:
6765 * True if the event has to be ignored.
6766 */
6767bool sata_lpm_ignore_phy_events(struct ata_link *link)
6768{
6769 unsigned long lpm_timeout = link->last_lpm_change +
6770 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6771
6772 /* if LPM is enabled, PHYRDY doesn't mean anything */
6773 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6774 return true;
6775
6776 /* ignore the first PHY event after the LPM policy changed
6777 * as it is might be spurious
6778 */
6779 if ((link->flags & ATA_LFLAG_CHANGED) &&
6780 time_before(jiffies, lpm_timeout))
6781 return true;
6782
6783 return false;
6784}
6785EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6786
6755/* 6787/*
6756 * Dummy port_ops 6788 * Dummy port_ops
6757 */ 6789 */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 07f41be38fbe..cf0022ec07f2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3597,6 +3597,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3597 } 3597 }
3598 } 3598 }
3599 3599
3600 link->last_lpm_change = jiffies;
3601 link->flags |= ATA_LFLAG_CHANGED;
3602
3600 return 0; 3603 return 0;
3601 3604
3602fail: 3605fail:
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
deleted file mode 100644
index 5cd60d6388ec..000000000000
--- a/drivers/ata/pata_scc.c
+++ /dev/null
@@ -1,1110 +0,0 @@
1/*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ata/ata_piix.c:
7 * Copyright 2003-2005 Red Hat Inc
8 * Copyright 2003-2005 Jeff Garzik
9 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
10 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
11 * Copyright (C) 2003 Red Hat Inc
12 *
13 * and drivers/ata/ahci.c:
14 * Copyright 2004-2005 Red Hat, Inc.
15 *
16 * and drivers/ata/libata-core.c:
17 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
18 * Copyright 2003-2004 Jeff Garzik
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/device.h>
41#include <scsi/scsi_host.h>
42#include <linux/libata.h>
43
44#define DRV_NAME "pata_scc"
45#define DRV_VERSION "0.3"
46
47#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
48
49/* PCI BARs */
50#define SCC_CTRL_BAR 0
51#define SCC_BMID_BAR 1
52
53/* offset of CTRL registers */
54#define SCC_CTL_PIOSHT 0x000
55#define SCC_CTL_PIOCT 0x004
56#define SCC_CTL_MDMACT 0x008
57#define SCC_CTL_MCRCST 0x00C
58#define SCC_CTL_SDMACT 0x010
59#define SCC_CTL_SCRCST 0x014
60#define SCC_CTL_UDENVT 0x018
61#define SCC_CTL_TDVHSEL 0x020
62#define SCC_CTL_MODEREG 0x024
63#define SCC_CTL_ECMODE 0xF00
64#define SCC_CTL_MAEA0 0xF50
65#define SCC_CTL_MAEC0 0xF54
66#define SCC_CTL_CCKCTRL 0xFF0
67
68/* offset of BMID registers */
69#define SCC_DMA_CMD 0x000
70#define SCC_DMA_STATUS 0x004
71#define SCC_DMA_TABLE_OFS 0x008
72#define SCC_DMA_INTMASK 0x010
73#define SCC_DMA_INTST 0x014
74#define SCC_DMA_PTERADD 0x018
75#define SCC_REG_CMD_ADDR 0x020
76#define SCC_REG_DATA 0x000
77#define SCC_REG_ERR 0x004
78#define SCC_REG_FEATURE 0x004
79#define SCC_REG_NSECT 0x008
80#define SCC_REG_LBAL 0x00C
81#define SCC_REG_LBAM 0x010
82#define SCC_REG_LBAH 0x014
83#define SCC_REG_DEVICE 0x018
84#define SCC_REG_STATUS 0x01C
85#define SCC_REG_CMD 0x01C
86#define SCC_REG_ALTSTATUS 0x020
87
88/* register value */
89#define TDVHSEL_MASTER 0x00000001
90#define TDVHSEL_SLAVE 0x00000004
91
92#define MODE_JCUSFEN 0x00000080
93
94#define ECMODE_VALUE 0x01
95
96#define CCKCTRL_ATARESET 0x00040000
97#define CCKCTRL_BUFCNT 0x00020000
98#define CCKCTRL_CRST 0x00010000
99#define CCKCTRL_OCLKEN 0x00000100
100#define CCKCTRL_ATACLKOEN 0x00000002
101#define CCKCTRL_LCLKEN 0x00000001
102
103#define QCHCD_IOS_SS 0x00000001
104
105#define QCHSD_STPDIAG 0x00020000
106
107#define INTMASK_MSK 0xD1000012
108#define INTSTS_SERROR 0x80000000
109#define INTSTS_PRERR 0x40000000
110#define INTSTS_RERR 0x10000000
111#define INTSTS_ICERR 0x01000000
112#define INTSTS_BMSINT 0x00000010
113#define INTSTS_BMHE 0x00000008
114#define INTSTS_IOIRQS 0x00000004
115#define INTSTS_INTRQ 0x00000002
116#define INTSTS_ACTEINT 0x00000001
117
118
119/* PIO transfer mode table */
120/* JCHST */
121static const unsigned long JCHSTtbl[2][7] = {
122 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
123 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
124};
125
126/* JCHHT */
127static const unsigned long JCHHTtbl[2][7] = {
128 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
129 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
130};
131
132/* JCHCT */
133static const unsigned long JCHCTtbl[2][7] = {
134 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
135 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
136};
137
138/* DMA transfer mode table */
139/* JCHDCTM/JCHDCTS */
140static const unsigned long JCHDCTxtbl[2][7] = {
141 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
142 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
143};
144
145/* JCSTWTM/JCSTWTS */
146static const unsigned long JCSTWTxtbl[2][7] = {
147 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
148 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
149};
150
151/* JCTSS */
152static const unsigned long JCTSStbl[2][7] = {
153 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
154 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
155};
156
157/* JCENVT */
158static const unsigned long JCENVTtbl[2][7] = {
159 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
160 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
161};
162
163/* JCACTSELS/JCACTSELM */
164static const unsigned long JCACTSELtbl[2][7] = {
165 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
166 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
167};
168
169static const struct pci_device_id scc_pci_tbl[] = {
170 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0},
171 { } /* terminate list */
172};
173
174/**
175 * scc_set_piomode - Initialize host controller PATA PIO timings
176 * @ap: Port whose timings we are configuring
177 * @adev: um
178 *
179 * Set PIO mode for device.
180 *
181 * LOCKING:
182 * None (inherited from caller).
183 */
184
185static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
186{
187 unsigned int pio = adev->pio_mode - XFER_PIO_0;
188 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
189 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
190 void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
191 void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
192 unsigned long reg;
193 int offset;
194
195 reg = in_be32(cckctrl_port);
196 if (reg & CCKCTRL_ATACLKOEN)
197 offset = 1; /* 133MHz */
198 else
199 offset = 0; /* 100MHz */
200
201 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
202 out_be32(piosht_port, reg);
203 reg = JCHCTtbl[offset][pio];
204 out_be32(pioct_port, reg);
205}
206
207/**
208 * scc_set_dmamode - Initialize host controller PATA DMA timings
209 * @ap: Port whose timings we are configuring
210 * @adev: um
211 *
212 * Set UDMA mode for device.
213 *
214 * LOCKING:
215 * None (inherited from caller).
216 */
217
218static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
219{
220 unsigned int udma = adev->dma_mode;
221 unsigned int is_slave = (adev->devno != 0);
222 u8 speed = udma;
223 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
224 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
225 void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
226 void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
227 void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
228 void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
229 void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
230 void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
231 int offset, idx;
232
233 if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
234 offset = 1; /* 133MHz */
235 else
236 offset = 0; /* 100MHz */
237
238 if (speed >= XFER_UDMA_0)
239 idx = speed - XFER_UDMA_0;
240 else
241 return;
242
243 if (is_slave) {
244 out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
245 out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
246 out_be32(tdvhsel_port,
247 (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
248 } else {
249 out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
250 out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
251 out_be32(tdvhsel_port,
252 (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
253 }
254 out_be32(udenvt_port,
255 JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
256}
257
258unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
259{
260 /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
261 if (adev->class == ATA_DEV_ATAPI &&
262 (mask & (0xE0 << ATA_SHIFT_UDMA))) {
263 printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
264 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
265 }
266 return mask;
267}
268
269/**
270 * scc_tf_load - send taskfile registers to host controller
271 * @ap: Port to which output is sent
272 * @tf: ATA taskfile register set
273 *
274 * Note: Original code is ata_sff_tf_load().
275 */
276
277static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
278{
279 struct ata_ioports *ioaddr = &ap->ioaddr;
280 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
281
282 if (tf->ctl != ap->last_ctl) {
283 out_be32(ioaddr->ctl_addr, tf->ctl);
284 ap->last_ctl = tf->ctl;
285 ata_wait_idle(ap);
286 }
287
288 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
289 out_be32(ioaddr->feature_addr, tf->hob_feature);
290 out_be32(ioaddr->nsect_addr, tf->hob_nsect);
291 out_be32(ioaddr->lbal_addr, tf->hob_lbal);
292 out_be32(ioaddr->lbam_addr, tf->hob_lbam);
293 out_be32(ioaddr->lbah_addr, tf->hob_lbah);
294 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
295 tf->hob_feature,
296 tf->hob_nsect,
297 tf->hob_lbal,
298 tf->hob_lbam,
299 tf->hob_lbah);
300 }
301
302 if (is_addr) {
303 out_be32(ioaddr->feature_addr, tf->feature);
304 out_be32(ioaddr->nsect_addr, tf->nsect);
305 out_be32(ioaddr->lbal_addr, tf->lbal);
306 out_be32(ioaddr->lbam_addr, tf->lbam);
307 out_be32(ioaddr->lbah_addr, tf->lbah);
308 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
309 tf->feature,
310 tf->nsect,
311 tf->lbal,
312 tf->lbam,
313 tf->lbah);
314 }
315
316 if (tf->flags & ATA_TFLAG_DEVICE) {
317 out_be32(ioaddr->device_addr, tf->device);
318 VPRINTK("device 0x%X\n", tf->device);
319 }
320
321 ata_wait_idle(ap);
322}
323
324/**
325 * scc_check_status - Read device status reg & clear interrupt
326 * @ap: port where the device is
327 *
328 * Note: Original code is ata_check_status().
329 */
330
331static u8 scc_check_status (struct ata_port *ap)
332{
333 return in_be32(ap->ioaddr.status_addr);
334}
335
336/**
337 * scc_tf_read - input device's ATA taskfile shadow registers
338 * @ap: Port from which input is read
339 * @tf: ATA taskfile register set for storing input
340 *
341 * Note: Original code is ata_sff_tf_read().
342 */
343
344static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
345{
346 struct ata_ioports *ioaddr = &ap->ioaddr;
347
348 tf->command = scc_check_status(ap);
349 tf->feature = in_be32(ioaddr->error_addr);
350 tf->nsect = in_be32(ioaddr->nsect_addr);
351 tf->lbal = in_be32(ioaddr->lbal_addr);
352 tf->lbam = in_be32(ioaddr->lbam_addr);
353 tf->lbah = in_be32(ioaddr->lbah_addr);
354 tf->device = in_be32(ioaddr->device_addr);
355
356 if (tf->flags & ATA_TFLAG_LBA48) {
357 out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
358 tf->hob_feature = in_be32(ioaddr->error_addr);
359 tf->hob_nsect = in_be32(ioaddr->nsect_addr);
360 tf->hob_lbal = in_be32(ioaddr->lbal_addr);
361 tf->hob_lbam = in_be32(ioaddr->lbam_addr);
362 tf->hob_lbah = in_be32(ioaddr->lbah_addr);
363 out_be32(ioaddr->ctl_addr, tf->ctl);
364 ap->last_ctl = tf->ctl;
365 }
366}
367
368/**
369 * scc_exec_command - issue ATA command to host controller
370 * @ap: port to which command is being issued
371 * @tf: ATA taskfile register set
372 *
373 * Note: Original code is ata_sff_exec_command().
374 */
375
376static void scc_exec_command (struct ata_port *ap,
377 const struct ata_taskfile *tf)
378{
379 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
380
381 out_be32(ap->ioaddr.command_addr, tf->command);
382 ata_sff_pause(ap);
383}
384
385/**
386 * scc_check_altstatus - Read device alternate status reg
387 * @ap: port where the device is
388 */
389
390static u8 scc_check_altstatus (struct ata_port *ap)
391{
392 return in_be32(ap->ioaddr.altstatus_addr);
393}
394
395/**
396 * scc_dev_select - Select device 0/1 on ATA bus
397 * @ap: ATA channel to manipulate
398 * @device: ATA device (numbered from zero) to select
399 *
400 * Note: Original code is ata_sff_dev_select().
401 */
402
403static void scc_dev_select (struct ata_port *ap, unsigned int device)
404{
405 u8 tmp;
406
407 if (device == 0)
408 tmp = ATA_DEVICE_OBS;
409 else
410 tmp = ATA_DEVICE_OBS | ATA_DEV1;
411
412 out_be32(ap->ioaddr.device_addr, tmp);
413 ata_sff_pause(ap);
414}
415
416/**
417 * scc_set_devctl - Write device control reg
418 * @ap: port where the device is
419 * @ctl: value to write
420 */
421
422static void scc_set_devctl(struct ata_port *ap, u8 ctl)
423{
424 out_be32(ap->ioaddr.ctl_addr, ctl);
425}
426
427/**
428 * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
429 * @qc: Info associated with this ATA transaction.
430 *
431 * Note: Original code is ata_bmdma_setup().
432 */
433
434static void scc_bmdma_setup (struct ata_queued_cmd *qc)
435{
436 struct ata_port *ap = qc->ap;
437 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
438 u8 dmactl;
439 void __iomem *mmio = ap->ioaddr.bmdma_addr;
440
441 /* load PRD table addr */
442 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
443
444 /* specify data direction, triple-check start bit is clear */
445 dmactl = in_be32(mmio + SCC_DMA_CMD);
446 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
447 if (!rw)
448 dmactl |= ATA_DMA_WR;
449 out_be32(mmio + SCC_DMA_CMD, dmactl);
450
451 /* issue r/w command */
452 ap->ops->sff_exec_command(ap, &qc->tf);
453}
454
455/**
456 * scc_bmdma_start - Start a PCI IDE BMDMA transaction
457 * @qc: Info associated with this ATA transaction.
458 *
459 * Note: Original code is ata_bmdma_start().
460 */
461
462static void scc_bmdma_start (struct ata_queued_cmd *qc)
463{
464 struct ata_port *ap = qc->ap;
465 u8 dmactl;
466 void __iomem *mmio = ap->ioaddr.bmdma_addr;
467
468 /* start host DMA transaction */
469 dmactl = in_be32(mmio + SCC_DMA_CMD);
470 out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
471}
472
473/**
474 * scc_devchk - PATA device presence detection
475 * @ap: ATA channel to examine
476 * @device: Device to examine (starting at zero)
477 *
478 * Note: Original code is ata_devchk().
479 */
480
481static unsigned int scc_devchk (struct ata_port *ap,
482 unsigned int device)
483{
484 struct ata_ioports *ioaddr = &ap->ioaddr;
485 u8 nsect, lbal;
486
487 ap->ops->sff_dev_select(ap, device);
488
489 out_be32(ioaddr->nsect_addr, 0x55);
490 out_be32(ioaddr->lbal_addr, 0xaa);
491
492 out_be32(ioaddr->nsect_addr, 0xaa);
493 out_be32(ioaddr->lbal_addr, 0x55);
494
495 out_be32(ioaddr->nsect_addr, 0x55);
496 out_be32(ioaddr->lbal_addr, 0xaa);
497
498 nsect = in_be32(ioaddr->nsect_addr);
499 lbal = in_be32(ioaddr->lbal_addr);
500
501 if ((nsect == 0x55) && (lbal == 0xaa))
502 return 1; /* we found a device */
503
504 return 0; /* nothing found */
505}
506
507/**
508 * scc_wait_after_reset - wait for devices to become ready after reset
509 *
510 * Note: Original code is ata_sff_wait_after_reset
511 */
512
513static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
514 unsigned long deadline)
515{
516 struct ata_port *ap = link->ap;
517 struct ata_ioports *ioaddr = &ap->ioaddr;
518 unsigned int dev0 = devmask & (1 << 0);
519 unsigned int dev1 = devmask & (1 << 1);
520 int rc, ret = 0;
521
522 /* Spec mandates ">= 2ms" before checking status. We wait
523 * 150ms, because that was the magic delay used for ATAPI
524 * devices in Hale Landis's ATADRVR, for the period of time
525 * between when the ATA command register is written, and then
526 * status is checked. Because waiting for "a while" before
527 * checking status is fine, post SRST, we perform this magic
528 * delay here as well.
529 *
530 * Old drivers/ide uses the 2mS rule and then waits for ready.
531 */
532 ata_msleep(ap, 150);
533
534 /* always check readiness of the master device */
535 rc = ata_sff_wait_ready(link, deadline);
536 /* -ENODEV means the odd clown forgot the D7 pulldown resistor
537 * and TF status is 0xff, bail out on it too.
538 */
539 if (rc)
540 return rc;
541
542 /* if device 1 was found in ata_devchk, wait for register
543 * access briefly, then wait for BSY to clear.
544 */
545 if (dev1) {
546 int i;
547
548 ap->ops->sff_dev_select(ap, 1);
549
550 /* Wait for register access. Some ATAPI devices fail
551 * to set nsect/lbal after reset, so don't waste too
552 * much time on it. We're gonna wait for !BSY anyway.
553 */
554 for (i = 0; i < 2; i++) {
555 u8 nsect, lbal;
556
557 nsect = in_be32(ioaddr->nsect_addr);
558 lbal = in_be32(ioaddr->lbal_addr);
559 if ((nsect == 1) && (lbal == 1))
560 break;
561 ata_msleep(ap, 50); /* give drive a breather */
562 }
563
564 rc = ata_sff_wait_ready(link, deadline);
565 if (rc) {
566 if (rc != -ENODEV)
567 return rc;
568 ret = rc;
569 }
570 }
571
572 /* is all this really necessary? */
573 ap->ops->sff_dev_select(ap, 0);
574 if (dev1)
575 ap->ops->sff_dev_select(ap, 1);
576 if (dev0)
577 ap->ops->sff_dev_select(ap, 0);
578
579 return ret;
580}
581
582/**
583 * scc_bus_softreset - PATA device software reset
584 *
585 * Note: Original code is ata_bus_softreset().
586 */
587
588static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
589 unsigned long deadline)
590{
591 struct ata_ioports *ioaddr = &ap->ioaddr;
592
593 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
594
595 /* software reset. causes dev0 to be selected */
596 out_be32(ioaddr->ctl_addr, ap->ctl);
597 udelay(20);
598 out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
599 udelay(20);
600 out_be32(ioaddr->ctl_addr, ap->ctl);
601
602 return scc_wait_after_reset(&ap->link, devmask, deadline);
603}
604
605/**
606 * scc_softreset - reset host port via ATA SRST
607 * @ap: port to reset
608 * @classes: resulting classes of attached devices
609 * @deadline: deadline jiffies for the operation
610 *
611 * Note: Original code is ata_sff_softreset().
612 */
613
614static int scc_softreset(struct ata_link *link, unsigned int *classes,
615 unsigned long deadline)
616{
617 struct ata_port *ap = link->ap;
618 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
619 unsigned int devmask = 0;
620 int rc;
621 u8 err;
622
623 DPRINTK("ENTER\n");
624
625 /* determine if device 0/1 are present */
626 if (scc_devchk(ap, 0))
627 devmask |= (1 << 0);
628 if (slave_possible && scc_devchk(ap, 1))
629 devmask |= (1 << 1);
630
631 /* select device 0 again */
632 ap->ops->sff_dev_select(ap, 0);
633
634 /* issue bus reset */
635 DPRINTK("about to softreset, devmask=%x\n", devmask);
636 rc = scc_bus_softreset(ap, devmask, deadline);
637 if (rc) {
638 ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
639 return -EIO;
640 }
641
642 /* determine by signature whether we have ATA or ATAPI devices */
643 classes[0] = ata_sff_dev_classify(&ap->link.device[0],
644 devmask & (1 << 0), &err);
645 if (slave_possible && err != 0x81)
646 classes[1] = ata_sff_dev_classify(&ap->link.device[1],
647 devmask & (1 << 1), &err);
648
649 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
650 return 0;
651}
652
653/**
654 * scc_bmdma_stop - Stop PCI IDE BMDMA transfer
655 * @qc: Command we are ending DMA for
656 */
657
658static void scc_bmdma_stop (struct ata_queued_cmd *qc)
659{
660 struct ata_port *ap = qc->ap;
661 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
662 void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
663 u32 reg;
664
665 while (1) {
666 reg = in_be32(bmid_base + SCC_DMA_INTST);
667
668 if (reg & INTSTS_SERROR) {
669 printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
670 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
671 out_be32(bmid_base + SCC_DMA_CMD,
672 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
673 continue;
674 }
675
676 if (reg & INTSTS_PRERR) {
677 u32 maea0, maec0;
678 maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
679 maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
680 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
681 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
682 out_be32(bmid_base + SCC_DMA_CMD,
683 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
684 continue;
685 }
686
687 if (reg & INTSTS_RERR) {
688 printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
689 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
690 out_be32(bmid_base + SCC_DMA_CMD,
691 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
692 continue;
693 }
694
695 if (reg & INTSTS_ICERR) {
696 out_be32(bmid_base + SCC_DMA_CMD,
697 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
698 printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
699 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
700 continue;
701 }
702
703 if (reg & INTSTS_BMSINT) {
704 unsigned int classes;
705 unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
706 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
707 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
708 /* TBD: SW reset */
709 scc_softreset(&ap->link, &classes, deadline);
710 continue;
711 }
712
713 if (reg & INTSTS_BMHE) {
714 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
715 continue;
716 }
717
718 if (reg & INTSTS_ACTEINT) {
719 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
720 continue;
721 }
722
723 if (reg & INTSTS_IOIRQS) {
724 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
725 continue;
726 }
727 break;
728 }
729
730 /* clear start/stop bit */
731 out_be32(bmid_base + SCC_DMA_CMD,
732 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
733
734 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
735 ata_sff_dma_pause(ap); /* dummy read */
736}
737
738/**
739 * scc_bmdma_status - Read PCI IDE BMDMA status
740 * @ap: Port associated with this ATA transaction.
741 */
742
743static u8 scc_bmdma_status (struct ata_port *ap)
744{
745 void __iomem *mmio = ap->ioaddr.bmdma_addr;
746 u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
747 u32 int_status = in_be32(mmio + SCC_DMA_INTST);
748 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
749 static int retry = 0;
750
751 /* return if IOS_SS is cleared */
752 if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
753 return host_stat;
754
755 /* errata A252,A308 workaround: Step4 */
756 if ((scc_check_altstatus(ap) & ATA_ERR)
757 && (int_status & INTSTS_INTRQ))
758 return (host_stat | ATA_DMA_INTR);
759
760 /* errata A308 workaround Step5 */
761 if (int_status & INTSTS_IOIRQS) {
762 host_stat |= ATA_DMA_INTR;
763
764 /* We don't check ATAPI DMA because it is limited to UDMA4 */
765 if ((qc->tf.protocol == ATA_PROT_DMA &&
766 qc->dev->xfer_mode > XFER_UDMA_4)) {
767 if (!(int_status & INTSTS_ACTEINT)) {
768 printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
769 ap->print_id);
770 host_stat |= ATA_DMA_ERR;
771 if (retry++)
772 ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
773 } else
774 retry = 0;
775 }
776 }
777
778 return host_stat;
779}
780
781/**
782 * scc_data_xfer - Transfer data by PIO
783 * @dev: device for this I/O
784 * @buf: data buffer
785 * @buflen: buffer length
786 * @rw: read/write
787 *
788 * Note: Original code is ata_sff_data_xfer().
789 */
790
791static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
792 unsigned int buflen, int rw)
793{
794 struct ata_port *ap = dev->link->ap;
795 unsigned int words = buflen >> 1;
796 unsigned int i;
797 __le16 *buf16 = (__le16 *) buf;
798 void __iomem *mmio = ap->ioaddr.data_addr;
799
800 /* Transfer multiple of 2 bytes */
801 if (rw == READ)
802 for (i = 0; i < words; i++)
803 buf16[i] = cpu_to_le16(in_be32(mmio));
804 else
805 for (i = 0; i < words; i++)
806 out_be32(mmio, le16_to_cpu(buf16[i]));
807
808 /* Transfer trailing 1 byte, if any. */
809 if (unlikely(buflen & 0x01)) {
810 __le16 align_buf[1] = { 0 };
811 unsigned char *trailing_buf = buf + buflen - 1;
812
813 if (rw == READ) {
814 align_buf[0] = cpu_to_le16(in_be32(mmio));
815 memcpy(trailing_buf, align_buf, 1);
816 } else {
817 memcpy(align_buf, trailing_buf, 1);
818 out_be32(mmio, le16_to_cpu(align_buf[0]));
819 }
820 words++;
821 }
822
823 return words << 1;
824}
825
826/**
827 * scc_postreset - standard postreset callback
828 * @ap: the target ata_port
829 * @classes: classes of attached devices
830 *
831 * Note: Original code is ata_sff_postreset().
832 */
833
834static void scc_postreset(struct ata_link *link, unsigned int *classes)
835{
836 struct ata_port *ap = link->ap;
837
838 DPRINTK("ENTER\n");
839
840 /* is double-select really necessary? */
841 if (classes[0] != ATA_DEV_NONE)
842 ap->ops->sff_dev_select(ap, 1);
843 if (classes[1] != ATA_DEV_NONE)
844 ap->ops->sff_dev_select(ap, 0);
845
846 /* bail out if no device is present */
847 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
848 DPRINTK("EXIT, no device\n");
849 return;
850 }
851
852 /* set up device control */
853 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
854
855 DPRINTK("EXIT\n");
856}
857
858/**
859 * scc_irq_clear - Clear PCI IDE BMDMA interrupt.
860 * @ap: Port associated with this ATA transaction.
861 *
862 * Note: Original code is ata_bmdma_irq_clear().
863 */
864
865static void scc_irq_clear (struct ata_port *ap)
866{
867 void __iomem *mmio = ap->ioaddr.bmdma_addr;
868
869 if (!mmio)
870 return;
871
872 out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
873}
874
875/**
876 * scc_port_start - Set port up for dma.
877 * @ap: Port to initialize
878 *
879 * Allocate space for PRD table using ata_bmdma_port_start().
880 * Set PRD table address for PTERADD. (PRD Transfer End Read)
881 */
882
883static int scc_port_start (struct ata_port *ap)
884{
885 void __iomem *mmio = ap->ioaddr.bmdma_addr;
886 int rc;
887
888 rc = ata_bmdma_port_start(ap);
889 if (rc)
890 return rc;
891
892 out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
893 return 0;
894}
895
896/**
897 * scc_port_stop - Undo scc_port_start()
898 * @ap: Port to shut down
899 *
900 * Reset PTERADD.
901 */
902
903static void scc_port_stop (struct ata_port *ap)
904{
905 void __iomem *mmio = ap->ioaddr.bmdma_addr;
906
907 out_be32(mmio + SCC_DMA_PTERADD, 0);
908}
909
910static struct scsi_host_template scc_sht = {
911 ATA_BMDMA_SHT(DRV_NAME),
912};
913
914static struct ata_port_operations scc_pata_ops = {
915 .inherits = &ata_bmdma_port_ops,
916
917 .set_piomode = scc_set_piomode,
918 .set_dmamode = scc_set_dmamode,
919 .mode_filter = scc_mode_filter,
920
921 .sff_tf_load = scc_tf_load,
922 .sff_tf_read = scc_tf_read,
923 .sff_exec_command = scc_exec_command,
924 .sff_check_status = scc_check_status,
925 .sff_check_altstatus = scc_check_altstatus,
926 .sff_dev_select = scc_dev_select,
927 .sff_set_devctl = scc_set_devctl,
928
929 .bmdma_setup = scc_bmdma_setup,
930 .bmdma_start = scc_bmdma_start,
931 .bmdma_stop = scc_bmdma_stop,
932 .bmdma_status = scc_bmdma_status,
933 .sff_data_xfer = scc_data_xfer,
934
935 .cable_detect = ata_cable_80wire,
936 .softreset = scc_softreset,
937 .postreset = scc_postreset,
938
939 .sff_irq_clear = scc_irq_clear,
940
941 .port_start = scc_port_start,
942 .port_stop = scc_port_stop,
943};
944
945static struct ata_port_info scc_port_info[] = {
946 {
947 .flags = ATA_FLAG_SLAVE_POSS,
948 .pio_mask = ATA_PIO4,
949 /* No MWDMA */
950 .udma_mask = ATA_UDMA6,
951 .port_ops = &scc_pata_ops,
952 },
953};
954
955/**
956 * scc_reset_controller - initialize SCC PATA controller.
957 */
958
959static int scc_reset_controller(struct ata_host *host)
960{
961 void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
962 void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
963 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
964 void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
965 void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
966 void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
967 void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
968 u32 reg = 0;
969
970 out_be32(cckctrl_port, reg);
971 reg |= CCKCTRL_ATACLKOEN;
972 out_be32(cckctrl_port, reg);
973 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
974 out_be32(cckctrl_port, reg);
975 reg |= CCKCTRL_CRST;
976 out_be32(cckctrl_port, reg);
977
978 for (;;) {
979 reg = in_be32(cckctrl_port);
980 if (reg & CCKCTRL_CRST)
981 break;
982 udelay(5000);
983 }
984
985 reg |= CCKCTRL_ATARESET;
986 out_be32(cckctrl_port, reg);
987 out_be32(ecmode_port, ECMODE_VALUE);
988 out_be32(mode_port, MODE_JCUSFEN);
989 out_be32(intmask_port, INTMASK_MSK);
990
991 if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
992 printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
993 return -EIO;
994 }
995
996 return 0;
997}
998
999/**
1000 * scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
1001 * @ioaddr: IO address structure to be initialized
1002 * @base: base address of BMID region
1003 */
1004
1005static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
1006{
1007 ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
1008 ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
1009 ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
1010 ioaddr->bmdma_addr = base;
1011 ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
1012 ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
1013 ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
1014 ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
1015 ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
1016 ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
1017 ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
1018 ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
1019 ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
1020 ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
1021}
1022
1023static int scc_host_init(struct ata_host *host)
1024{
1025 struct pci_dev *pdev = to_pci_dev(host->dev);
1026 int rc;
1027
1028 rc = scc_reset_controller(host);
1029 if (rc)
1030 return rc;
1031
1032 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
1033 if (rc)
1034 return rc;
1035 rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
1036 if (rc)
1037 return rc;
1038
1039 scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
1040
1041 pci_set_master(pdev);
1042
1043 return 0;
1044}
1045
1046/**
1047 * scc_init_one - Register SCC PATA device with kernel services
1048 * @pdev: PCI device to register
1049 * @ent: Entry in scc_pci_tbl matching with @pdev
1050 *
1051 * LOCKING:
1052 * Inherited from PCI layer (may sleep).
1053 *
1054 * RETURNS:
1055 * Zero on success, or -ERRNO value.
1056 */
1057
1058static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1059{
1060 unsigned int board_idx = (unsigned int) ent->driver_data;
1061 const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
1062 struct ata_host *host;
1063 int rc;
1064
1065 ata_print_version_once(&pdev->dev, DRV_VERSION);
1066
1067 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1068 if (!host)
1069 return -ENOMEM;
1070
1071 rc = pcim_enable_device(pdev);
1072 if (rc)
1073 return rc;
1074
1075 rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
1076 if (rc == -EBUSY)
1077 pcim_pin_device(pdev);
1078 if (rc)
1079 return rc;
1080 host->iomap = pcim_iomap_table(pdev);
1081
1082 ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
1083 ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
1084
1085 rc = scc_host_init(host);
1086 if (rc)
1087 return rc;
1088
1089 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
1090 IRQF_SHARED, &scc_sht);
1091}
1092
1093static struct pci_driver scc_pci_driver = {
1094 .name = DRV_NAME,
1095 .id_table = scc_pci_tbl,
1096 .probe = scc_init_one,
1097 .remove = ata_pci_remove_one,
1098#ifdef CONFIG_PM_SLEEP
1099 .suspend = ata_pci_device_suspend,
1100 .resume = ata_pci_device_resume,
1101#endif
1102};
1103
1104module_pci_driver(scc_pci_driver);
1105
1106MODULE_AUTHOR("Toshiba corp");
1107MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
1108MODULE_LICENSE("GPL");
1109MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
1110MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ae3fcb4199e9..d7173cb1ea76 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1620,8 +1620,8 @@ out:
1620 1620
1621static void loop_remove(struct loop_device *lo) 1621static void loop_remove(struct loop_device *lo)
1622{ 1622{
1623 del_gendisk(lo->lo_disk);
1624 blk_cleanup_queue(lo->lo_queue); 1623 blk_cleanup_queue(lo->lo_queue);
1624 del_gendisk(lo->lo_disk);
1625 blk_mq_free_tag_set(&lo->tag_set); 1625 blk_mq_free_tag_set(&lo->tag_set);
1626 put_disk(lo->lo_disk); 1626 put_disk(lo->lo_disk);
1627 kfree(lo); 1627 kfree(lo);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 6b736b00f63e..88f13c525712 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
944static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, 944static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
945 u8 *inq_response, int alloc_len) 945 u8 *inq_response, int alloc_len)
946{ 946{
947 __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); 947 __be32 max_sectors = cpu_to_be32(
948 nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
948 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); 949 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
949 __be32 discard_desc_count = cpu_to_be32(0x100); 950 __be32 discard_desc_count = cpu_to_be32(0x100);
950 951
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index bd2b3bbbb22c..713fc9ff1149 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
265 atomic_dec(&blkif->persistent_gnt_in_use); 265 atomic_dec(&blkif->persistent_gnt_in_use);
266} 266}
267 267
268static void free_persistent_gnts_unmap_callback(int result,
269 struct gntab_unmap_queue_data *data)
270{
271 struct completion *c = data->data;
272
273 /* BUG_ON used to reproduce existing behaviour,
274 but is this the best way to deal with this? */
275 BUG_ON(result);
276 complete(c);
277}
278
279static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, 268static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
280 unsigned int num) 269 unsigned int num)
281{ 270{
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 struct rb_node *n; 274 struct rb_node *n;
286 int segs_to_unmap = 0; 275 int segs_to_unmap = 0;
287 struct gntab_unmap_queue_data unmap_data; 276 struct gntab_unmap_queue_data unmap_data;
288 struct completion unmap_completion;
289 277
290 init_completion(&unmap_completion);
291
292 unmap_data.data = &unmap_completion;
293 unmap_data.done = &free_persistent_gnts_unmap_callback;
294 unmap_data.pages = pages; 278 unmap_data.pages = pages;
295 unmap_data.unmap_ops = unmap; 279 unmap_data.unmap_ops = unmap;
296 unmap_data.kunmap_ops = NULL; 280 unmap_data.kunmap_ops = NULL;
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
310 !rb_next(&persistent_gnt->node)) { 294 !rb_next(&persistent_gnt->node)) {
311 295
312 unmap_data.count = segs_to_unmap; 296 unmap_data.count = segs_to_unmap;
313 gnttab_unmap_refs_async(&unmap_data); 297 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
314 wait_for_completion(&unmap_completion);
315 298
316 put_free_pages(blkif, pages, segs_to_unmap); 299 put_free_pages(blkif, pages, segs_to_unmap);
317 segs_to_unmap = 0; 300 segs_to_unmap = 0;
@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
329 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 312 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
330 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 313 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331 struct persistent_gnt *persistent_gnt; 314 struct persistent_gnt *persistent_gnt;
332 int ret, segs_to_unmap = 0; 315 int segs_to_unmap = 0;
333 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); 316 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
317 struct gntab_unmap_queue_data unmap_data;
318
319 unmap_data.pages = pages;
320 unmap_data.unmap_ops = unmap;
321 unmap_data.kunmap_ops = NULL;
334 322
335 while(!list_empty(&blkif->persistent_purge_list)) { 323 while(!list_empty(&blkif->persistent_purge_list)) {
336 persistent_gnt = list_first_entry(&blkif->persistent_purge_list, 324 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
346 pages[segs_to_unmap] = persistent_gnt->page; 334 pages[segs_to_unmap] = persistent_gnt->page;
347 335
348 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 336 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
349 ret = gnttab_unmap_refs(unmap, NULL, pages, 337 unmap_data.count = segs_to_unmap;
350 segs_to_unmap); 338 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
351 BUG_ON(ret);
352 put_free_pages(blkif, pages, segs_to_unmap); 339 put_free_pages(blkif, pages, segs_to_unmap);
353 segs_to_unmap = 0; 340 segs_to_unmap = 0;
354 } 341 }
355 kfree(persistent_gnt); 342 kfree(persistent_gnt);
356 } 343 }
357 if (segs_to_unmap > 0) { 344 if (segs_to_unmap > 0) {
358 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); 345 unmap_data.count = segs_to_unmap;
359 BUG_ON(ret); 346 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
360 put_free_pages(blkif, pages, segs_to_unmap); 347 put_free_pages(blkif, pages, segs_to_unmap);
361 } 348 }
362} 349}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c94386aa563d..8dcbced0eafd 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -74,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev)
74 return (struct zram *)dev_to_disk(dev)->private_data; 74 return (struct zram *)dev_to_disk(dev)->private_data;
75} 75}
76 76
77static ssize_t compact_store(struct device *dev,
78 struct device_attribute *attr, const char *buf, size_t len)
79{
80 unsigned long nr_migrated;
81 struct zram *zram = dev_to_zram(dev);
82 struct zram_meta *meta;
83
84 down_read(&zram->init_lock);
85 if (!init_done(zram)) {
86 up_read(&zram->init_lock);
87 return -EINVAL;
88 }
89
90 meta = zram->meta;
91 nr_migrated = zs_compact(meta->mem_pool);
92 atomic64_add(nr_migrated, &zram->stats.num_migrated);
93 up_read(&zram->init_lock);
94
95 return len;
96}
97
77static ssize_t disksize_show(struct device *dev, 98static ssize_t disksize_show(struct device *dev,
78 struct device_attribute *attr, char *buf) 99 struct device_attribute *attr, char *buf)
79{ 100{
@@ -1038,6 +1059,7 @@ static const struct block_device_operations zram_devops = {
1038 .owner = THIS_MODULE 1059 .owner = THIS_MODULE
1039}; 1060};
1040 1061
1062static DEVICE_ATTR_WO(compact);
1041static DEVICE_ATTR_RW(disksize); 1063static DEVICE_ATTR_RW(disksize);
1042static DEVICE_ATTR_RO(initstate); 1064static DEVICE_ATTR_RO(initstate);
1043static DEVICE_ATTR_WO(reset); 1065static DEVICE_ATTR_WO(reset);
@@ -1114,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = {
1114 &dev_attr_num_writes.attr, 1136 &dev_attr_num_writes.attr,
1115 &dev_attr_failed_reads.attr, 1137 &dev_attr_failed_reads.attr,
1116 &dev_attr_failed_writes.attr, 1138 &dev_attr_failed_writes.attr,
1139 &dev_attr_compact.attr,
1117 &dev_attr_invalid_io.attr, 1140 &dev_attr_invalid_io.attr,
1118 &dev_attr_notify_free.attr, 1141 &dev_attr_notify_free.attr,
1119 &dev_attr_zero_pages.attr, 1142 &dev_attr_zero_pages.attr,
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4f7e8d400bc0..6de97b3871b0 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -227,7 +227,6 @@ static void bt3c_receive(struct bt3c_info *info)
227 iobase = info->p_dev->resource[0]->start; 227 iobase = info->p_dev->resource[0]->start;
228 228
229 avail = bt3c_read(iobase, 0x7006); 229 avail = bt3c_read(iobase, 0x7006);
230 //printk("bt3c_cs: receiving %d bytes\n", avail);
231 230
232 bt3c_address(iobase, 0x7480); 231 bt3c_address(iobase, 0x7480);
233 while (size < avail) { 232 while (size < avail) {
@@ -250,7 +249,6 @@ static void bt3c_receive(struct bt3c_info *info)
250 249
251 bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L); 250 bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
252 inb(iobase + DATA_H); 251 inb(iobase + DATA_H);
253 //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
254 252
255 switch (bt_cb(info->rx_skb)->pkt_type) { 253 switch (bt_cb(info->rx_skb)->pkt_type) {
256 254
@@ -364,7 +362,6 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
364 if (stat & 0x0001) 362 if (stat & 0x0001)
365 bt3c_receive(info); 363 bt3c_receive(info);
366 if (stat & 0x0002) { 364 if (stat & 0x0002) {
367 //BT_ERR("Ack (stat=0x%04x)", stat);
368 clear_bit(XMIT_SENDING, &(info->tx_state)); 365 clear_bit(XMIT_SENDING, &(info->tx_state));
369 bt3c_write_wakeup(info); 366 bt3c_write_wakeup(info);
370 } 367 }
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index d0741f3ed7ec..4bba86677adc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -95,6 +95,78 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
95} 95}
96EXPORT_SYMBOL_GPL(btbcm_set_bdaddr); 96EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
97 97
98int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
99{
100 const struct hci_command_hdr *cmd;
101 const struct firmware *fw;
102 const u8 *fw_ptr;
103 size_t fw_size;
104 struct sk_buff *skb;
105 u16 opcode;
106 int err;
107
108 err = request_firmware(&fw, firmware, &hdev->dev);
109 if (err < 0) {
110 BT_INFO("%s: BCM: Patch %s not found", hdev->name, firmware);
111 return err;
112 }
113
114 /* Start Download */
115 skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
116 if (IS_ERR(skb)) {
117 err = PTR_ERR(skb);
118 BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
119 hdev->name, err);
120 goto done;
121 }
122 kfree_skb(skb);
123
124 /* 50 msec delay after Download Minidrv completes */
125 msleep(50);
126
127 fw_ptr = fw->data;
128 fw_size = fw->size;
129
130 while (fw_size >= sizeof(*cmd)) {
131 const u8 *cmd_param;
132
133 cmd = (struct hci_command_hdr *)fw_ptr;
134 fw_ptr += sizeof(*cmd);
135 fw_size -= sizeof(*cmd);
136
137 if (fw_size < cmd->plen) {
138 BT_ERR("%s: BCM: Patch %s is corrupted", hdev->name,
139 firmware);
140 err = -EINVAL;
141 goto done;
142 }
143
144 cmd_param = fw_ptr;
145 fw_ptr += cmd->plen;
146 fw_size -= cmd->plen;
147
148 opcode = le16_to_cpu(cmd->opcode);
149
150 skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
151 HCI_INIT_TIMEOUT);
152 if (IS_ERR(skb)) {
153 err = PTR_ERR(skb);
154 BT_ERR("%s: BCM: Patch command %04x failed (%d)",
155 hdev->name, opcode, err);
156 goto done;
157 }
158 kfree_skb(skb);
159 }
160
161 /* 250 msec delay after Launch Ram completes */
162 msleep(250);
163
164done:
165 release_firmware(fw);
166 return err;
167}
168EXPORT_SYMBOL(btbcm_patchram);
169
98static int btbcm_reset(struct hci_dev *hdev) 170static int btbcm_reset(struct hci_dev *hdev)
99{ 171{
100 struct sk_buff *skb; 172 struct sk_buff *skb;
@@ -198,12 +270,8 @@ static const struct {
198 270
199int btbcm_setup_patchram(struct hci_dev *hdev) 271int btbcm_setup_patchram(struct hci_dev *hdev)
200{ 272{
201 const struct hci_command_hdr *cmd;
202 const struct firmware *fw;
203 const u8 *fw_ptr;
204 size_t fw_size;
205 char fw_name[64]; 273 char fw_name[64];
206 u16 opcode, subver, rev, pid, vid; 274 u16 subver, rev, pid, vid;
207 const char *hw_name = NULL; 275 const char *hw_name = NULL;
208 struct sk_buff *skb; 276 struct sk_buff *skb;
209 struct hci_rp_read_local_version *ver; 277 struct hci_rp_read_local_version *ver;
@@ -273,74 +341,19 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
273 hw_name ? : "BCM", (subver & 0x7000) >> 13, 341 hw_name ? : "BCM", (subver & 0x7000) >> 13,
274 (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); 342 (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
275 343
276 err = request_firmware(&fw, fw_name, &hdev->dev); 344 err = btbcm_patchram(hdev, fw_name);
277 if (err < 0) { 345 if (err == -ENOENT)
278 BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
279 return 0; 346 return 0;
280 }
281
282 /* Start Download */
283 skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
284 if (IS_ERR(skb)) {
285 err = PTR_ERR(skb);
286 BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
287 hdev->name, err);
288 goto reset;
289 }
290 kfree_skb(skb);
291
292 /* 50 msec delay after Download Minidrv completes */
293 msleep(50);
294
295 fw_ptr = fw->data;
296 fw_size = fw->size;
297
298 while (fw_size >= sizeof(*cmd)) {
299 const u8 *cmd_param;
300
301 cmd = (struct hci_command_hdr *)fw_ptr;
302 fw_ptr += sizeof(*cmd);
303 fw_size -= sizeof(*cmd);
304
305 if (fw_size < cmd->plen) {
306 BT_ERR("%s: BCM: patch %s is corrupted", hdev->name,
307 fw_name);
308 err = -EINVAL;
309 goto reset;
310 }
311 347
312 cmd_param = fw_ptr;
313 fw_ptr += cmd->plen;
314 fw_size -= cmd->plen;
315
316 opcode = le16_to_cpu(cmd->opcode);
317
318 skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
319 HCI_INIT_TIMEOUT);
320 if (IS_ERR(skb)) {
321 err = PTR_ERR(skb);
322 BT_ERR("%s: BCM: patch command %04x failed (%d)",
323 hdev->name, opcode, err);
324 goto reset;
325 }
326 kfree_skb(skb);
327 }
328
329 /* 250 msec delay after Launch Ram completes */
330 msleep(250);
331
332reset:
333 /* Reset */ 348 /* Reset */
334 err = btbcm_reset(hdev); 349 err = btbcm_reset(hdev);
335 if (err) 350 if (err)
336 goto done; 351 return err;
337 352
338 /* Read Local Version Info */ 353 /* Read Local Version Info */
339 skb = btbcm_read_local_version(hdev); 354 skb = btbcm_read_local_version(hdev);
340 if (IS_ERR(skb)) { 355 if (IS_ERR(skb))
341 err = PTR_ERR(skb); 356 return PTR_ERR(skb);
342 goto done;
343 }
344 357
345 ver = (struct hci_rp_read_local_version *)skb->data; 358 ver = (struct hci_rp_read_local_version *)skb->data;
346 rev = le16_to_cpu(ver->hci_rev); 359 rev = le16_to_cpu(ver->hci_rev);
@@ -355,10 +368,7 @@ reset:
355 368
356 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 369 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
357 370
358done: 371 return 0;
359 release_firmware(fw);
360
361 return err;
362} 372}
363EXPORT_SYMBOL_GPL(btbcm_setup_patchram); 373EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
364 374
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index 34268ae3eb46..eb6ab5f9483d 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -25,6 +25,7 @@
25 25
26int btbcm_check_bdaddr(struct hci_dev *hdev); 26int btbcm_check_bdaddr(struct hci_dev *hdev);
27int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); 27int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
28int btbcm_patchram(struct hci_dev *hdev, const char *firmware);
28 29
29int btbcm_setup_patchram(struct hci_dev *hdev); 30int btbcm_setup_patchram(struct hci_dev *hdev);
30int btbcm_setup_apple(struct hci_dev *hdev); 31int btbcm_setup_apple(struct hci_dev *hdev);
@@ -41,6 +42,11 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
41 return -EOPNOTSUPP; 42 return -EOPNOTSUPP;
42} 43}
43 44
45static inline int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
46{
47 return -EOPNOTSUPP;
48}
49
44static inline int btbcm_setup_patchram(struct hci_dev *hdev) 50static inline int btbcm_setup_patchram(struct hci_dev *hdev)
45{ 51{
46 return 0; 52 return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index de7b236eeae7..d21f3b4176d3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/usb.h> 25#include <linux/usb.h>
26#include <linux/firmware.h> 26#include <linux/firmware.h>
27#include <asm/unaligned.h>
27 28
28#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
@@ -57,6 +58,7 @@ static struct usb_driver btusb_driver;
57#define BTUSB_AMP 0x4000 58#define BTUSB_AMP 0x4000
58#define BTUSB_QCA_ROME 0x8000 59#define BTUSB_QCA_ROME 0x8000
59#define BTUSB_BCM_APPLE 0x10000 60#define BTUSB_BCM_APPLE 0x10000
61#define BTUSB_REALTEK 0x20000
60 62
61static const struct usb_device_id btusb_table[] = { 63static const struct usb_device_id btusb_table[] = {
62 /* Generic Bluetooth USB device */ 64 /* Generic Bluetooth USB device */
@@ -288,6 +290,28 @@ static const struct usb_device_id blacklist_table[] = {
288 { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), 290 { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
289 .driver_info = BTUSB_IGNORE }, 291 .driver_info = BTUSB_IGNORE },
290 292
293 /* Realtek Bluetooth devices */
294 { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
295 .driver_info = BTUSB_REALTEK },
296
297 /* Additional Realtek 8723AE Bluetooth devices */
298 { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
299 { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
300
301 /* Additional Realtek 8723BE Bluetooth devices */
302 { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK },
303 { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK },
304 { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
305 { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
306 { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
307
308 /* Additional Realtek 8821AE Bluetooth devices */
309 { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
310 { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
311 { USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK },
312 { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
313 { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
314
291 { } /* Terminating entry */ 315 { } /* Terminating entry */
292}; 316};
293 317
@@ -892,7 +916,7 @@ static int btusb_open(struct hci_dev *hdev)
892 */ 916 */
893 if (data->setup_on_usb) { 917 if (data->setup_on_usb) {
894 err = data->setup_on_usb(hdev); 918 err = data->setup_on_usb(hdev);
895 if (err <0) 919 if (err < 0)
896 return err; 920 return err;
897 } 921 }
898 922
@@ -1345,6 +1369,378 @@ static int btusb_setup_csr(struct hci_dev *hdev)
1345 return ret; 1369 return ret;
1346} 1370}
1347 1371
1372#define RTL_FRAG_LEN 252
1373
1374struct rtl_download_cmd {
1375 __u8 index;
1376 __u8 data[RTL_FRAG_LEN];
1377} __packed;
1378
1379struct rtl_download_response {
1380 __u8 status;
1381 __u8 index;
1382} __packed;
1383
1384struct rtl_rom_version_evt {
1385 __u8 status;
1386 __u8 version;
1387} __packed;
1388
1389struct rtl_epatch_header {
1390 __u8 signature[8];
1391 __le32 fw_version;
1392 __le16 num_patches;
1393} __packed;
1394
1395#define RTL_EPATCH_SIGNATURE "Realtech"
1396#define RTL_ROM_LMP_3499 0x3499
1397#define RTL_ROM_LMP_8723A 0x1200
1398#define RTL_ROM_LMP_8723B 0x8723
1399#define RTL_ROM_LMP_8821A 0x8821
1400#define RTL_ROM_LMP_8761A 0x8761
1401
1402static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
1403{
1404 struct rtl_rom_version_evt *rom_version;
1405 struct sk_buff *skb;
1406 int ret;
1407
1408 /* Read RTL ROM version command */
1409 skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
1410 if (IS_ERR(skb)) {
1411 BT_ERR("%s: Read ROM version failed (%ld)",
1412 hdev->name, PTR_ERR(skb));
1413 return PTR_ERR(skb);
1414 }
1415
1416 if (skb->len != sizeof(*rom_version)) {
1417 BT_ERR("%s: RTL version event length mismatch", hdev->name);
1418 kfree_skb(skb);
1419 return -EIO;
1420 }
1421
1422 rom_version = (struct rtl_rom_version_evt *)skb->data;
1423 BT_INFO("%s: rom_version status=%x version=%x",
1424 hdev->name, rom_version->status, rom_version->version);
1425
1426 ret = rom_version->status;
1427 if (ret == 0)
1428 *version = rom_version->version;
1429
1430 kfree_skb(skb);
1431 return ret;
1432}
1433
1434static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
1435 const struct firmware *fw,
1436 unsigned char **_buf)
1437{
1438 const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
1439 struct rtl_epatch_header *epatch_info;
1440 unsigned char *buf;
1441 int i, ret, len;
1442 size_t min_size;
1443 u8 opcode, length, data, rom_version = 0;
1444 int project_id = -1;
1445 const unsigned char *fwptr, *chip_id_base;
1446 const unsigned char *patch_length_base, *patch_offset_base;
1447 u32 patch_offset = 0;
1448 u16 patch_length, num_patches;
1449 const u16 project_id_to_lmp_subver[] = {
1450 RTL_ROM_LMP_8723A,
1451 RTL_ROM_LMP_8723B,
1452 RTL_ROM_LMP_8821A,
1453 RTL_ROM_LMP_8761A
1454 };
1455
1456 ret = rtl_read_rom_version(hdev, &rom_version);
1457 if (ret)
1458 return -bt_to_errno(ret);
1459
1460 min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
1461 if (fw->size < min_size)
1462 return -EINVAL;
1463
1464 fwptr = fw->data + fw->size - sizeof(extension_sig);
1465 if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
1466 BT_ERR("%s: extension section signature mismatch", hdev->name);
1467 return -EINVAL;
1468 }
1469
1470 /* Loop from the end of the firmware parsing instructions, until
1471 * we find an instruction that identifies the "project ID" for the
1472 * hardware supported by this firwmare file.
1473 * Once we have that, we double-check that that project_id is suitable
1474 * for the hardware we are working with.
1475 */
1476 while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
1477 opcode = *--fwptr;
1478 length = *--fwptr;
1479 data = *--fwptr;
1480
1481 BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
1482
1483 if (opcode == 0xff) /* EOF */
1484 break;
1485
1486 if (length == 0) {
1487 BT_ERR("%s: found instruction with length 0",
1488 hdev->name);
1489 return -EINVAL;
1490 }
1491
1492 if (opcode == 0 && length == 1) {
1493 project_id = data;
1494 break;
1495 }
1496
1497 fwptr -= length;
1498 }
1499
1500 if (project_id < 0) {
1501 BT_ERR("%s: failed to find version instruction", hdev->name);
1502 return -EINVAL;
1503 }
1504
1505 if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
1506 BT_ERR("%s: unknown project id %d", hdev->name, project_id);
1507 return -EINVAL;
1508 }
1509
1510 if (lmp_subver != project_id_to_lmp_subver[project_id]) {
1511 BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
1512 project_id_to_lmp_subver[project_id], lmp_subver);
1513 return -EINVAL;
1514 }
1515
1516 epatch_info = (struct rtl_epatch_header *)fw->data;
1517 if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
1518 BT_ERR("%s: bad EPATCH signature", hdev->name);
1519 return -EINVAL;
1520 }
1521
1522 num_patches = le16_to_cpu(epatch_info->num_patches);
1523 BT_DBG("fw_version=%x, num_patches=%d",
1524 le32_to_cpu(epatch_info->fw_version), num_patches);
1525
1526 /* After the rtl_epatch_header there is a funky patch metadata section.
1527 * Assuming 2 patches, the layout is:
1528 * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
1529 *
1530 * Find the right patch for this chip.
1531 */
1532 min_size += 8 * num_patches;
1533 if (fw->size < min_size)
1534 return -EINVAL;
1535
1536 chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
1537 patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
1538 patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
1539 for (i = 0; i < num_patches; i++) {
1540 u16 chip_id = get_unaligned_le16(chip_id_base +
1541 (i * sizeof(u16)));
1542 if (chip_id == rom_version + 1) {
1543 patch_length = get_unaligned_le16(patch_length_base +
1544 (i * sizeof(u16)));
1545 patch_offset = get_unaligned_le32(patch_offset_base +
1546 (i * sizeof(u32)));
1547 break;
1548 }
1549 }
1550
1551 if (!patch_offset) {
1552 BT_ERR("%s: didn't find patch for chip id %d",
1553 hdev->name, rom_version);
1554 return -EINVAL;
1555 }
1556
1557 BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
1558 min_size = patch_offset + patch_length;
1559 if (fw->size < min_size)
1560 return -EINVAL;
1561
1562 /* Copy the firmware into a new buffer and write the version at
1563 * the end.
1564 */
1565 len = patch_length;
1566 buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
1567 if (!buf)
1568 return -ENOMEM;
1569
1570 memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
1571
1572 *_buf = buf;
1573 return len;
1574}
1575
1576static int rtl_download_firmware(struct hci_dev *hdev,
1577 const unsigned char *data, int fw_len)
1578{
1579 struct rtl_download_cmd *dl_cmd;
1580 int frag_num = fw_len / RTL_FRAG_LEN + 1;
1581 int frag_len = RTL_FRAG_LEN;
1582 int ret = 0;
1583 int i;
1584
1585 dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
1586 if (!dl_cmd)
1587 return -ENOMEM;
1588
1589 for (i = 0; i < frag_num; i++) {
1590 struct rtl_download_response *dl_resp;
1591 struct sk_buff *skb;
1592
1593 BT_DBG("download fw (%d/%d)", i, frag_num);
1594
1595 dl_cmd->index = i;
1596 if (i == (frag_num - 1)) {
1597 dl_cmd->index |= 0x80; /* data end */
1598 frag_len = fw_len % RTL_FRAG_LEN;
1599 }
1600 memcpy(dl_cmd->data, data, frag_len);
1601
1602 /* Send download command */
1603 skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
1604 HCI_INIT_TIMEOUT);
1605 if (IS_ERR(skb)) {
1606 BT_ERR("%s: download fw command failed (%ld)",
1607 hdev->name, PTR_ERR(skb));
1608 ret = -PTR_ERR(skb);
1609 goto out;
1610 }
1611
1612 if (skb->len != sizeof(*dl_resp)) {
1613 BT_ERR("%s: download fw event length mismatch",
1614 hdev->name);
1615 kfree_skb(skb);
1616 ret = -EIO;
1617 goto out;
1618 }
1619
1620 dl_resp = (struct rtl_download_response *)skb->data;
1621 if (dl_resp->status != 0) {
1622 kfree_skb(skb);
1623 ret = bt_to_errno(dl_resp->status);
1624 goto out;
1625 }
1626
1627 kfree_skb(skb);
1628 data += RTL_FRAG_LEN;
1629 }
1630
1631out:
1632 kfree(dl_cmd);
1633 return ret;
1634}
1635
1636static int btusb_setup_rtl8723a(struct hci_dev *hdev)
1637{
1638 struct btusb_data *data = dev_get_drvdata(&hdev->dev);
1639 struct usb_device *udev = interface_to_usbdev(data->intf);
1640 const struct firmware *fw;
1641 int ret;
1642
1643 BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
1644 ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
1645 if (ret < 0) {
1646 BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
1647 return ret;
1648 }
1649
1650 if (fw->size < 8) {
1651 ret = -EINVAL;
1652 goto out;
1653 }
1654
1655 /* Check that the firmware doesn't have the epatch signature
1656 * (which is only for RTL8723B and newer).
1657 */
1658 if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
1659 BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
1660 ret = -EINVAL;
1661 goto out;
1662 }
1663
1664 ret = rtl_download_firmware(hdev, fw->data, fw->size);
1665
1666out:
1667 release_firmware(fw);
1668 return ret;
1669}
1670
1671static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
1672 const char *fw_name)
1673{
1674 struct btusb_data *data = dev_get_drvdata(&hdev->dev);
1675 struct usb_device *udev = interface_to_usbdev(data->intf);
1676 unsigned char *fw_data = NULL;
1677 const struct firmware *fw;
1678 int ret;
1679
1680 BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
1681 ret = request_firmware(&fw, fw_name, &udev->dev);
1682 if (ret < 0) {
1683 BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
1684 return ret;
1685 }
1686
1687 ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
1688 if (ret < 0)
1689 goto out;
1690
1691 ret = rtl_download_firmware(hdev, fw_data, ret);
1692 kfree(fw_data);
1693 if (ret < 0)
1694 goto out;
1695
1696out:
1697 release_firmware(fw);
1698 return ret;
1699}
1700
1701static int btusb_setup_realtek(struct hci_dev *hdev)
1702{
1703 struct sk_buff *skb;
1704 struct hci_rp_read_local_version *resp;
1705 u16 lmp_subver;
1706
1707 skb = btusb_read_local_version(hdev);
1708 if (IS_ERR(skb))
1709 return -PTR_ERR(skb);
1710
1711 resp = (struct hci_rp_read_local_version *)skb->data;
1712 BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
1713 "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
1714 resp->lmp_ver, resp->lmp_subver);
1715
1716 lmp_subver = le16_to_cpu(resp->lmp_subver);
1717 kfree_skb(skb);
1718
1719 /* Match a set of subver values that correspond to stock firmware,
1720 * which is not compatible with standard btusb.
1721 * If matched, upload an alternative firmware that does conform to
1722 * standard btusb. Once that firmware is uploaded, the subver changes
1723 * to a different value.
1724 */
1725 switch (lmp_subver) {
1726 case RTL_ROM_LMP_8723A:
1727 case RTL_ROM_LMP_3499:
1728 return btusb_setup_rtl8723a(hdev);
1729 case RTL_ROM_LMP_8723B:
1730 return btusb_setup_rtl8723b(hdev, lmp_subver,
1731 "rtl_bt/rtl8723b_fw.bin");
1732 case RTL_ROM_LMP_8821A:
1733 return btusb_setup_rtl8723b(hdev, lmp_subver,
1734 "rtl_bt/rtl8821a_fw.bin");
1735 case RTL_ROM_LMP_8761A:
1736 return btusb_setup_rtl8723b(hdev, lmp_subver,
1737 "rtl_bt/rtl8761a_fw.bin");
1738 default:
1739 BT_INFO("rtl: assuming no firmware upload needed.");
1740 return 0;
1741 }
1742}
1743
1348static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, 1744static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
1349 struct intel_version *ver) 1745 struct intel_version *ver)
1350{ 1746{
@@ -2577,7 +2973,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
2577 int i, err; 2973 int i, err;
2578 2974
2579 err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver, 2975 err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
2580 sizeof(ver)); 2976 sizeof(ver));
2581 if (err < 0) 2977 if (err < 0)
2582 return err; 2978 return err;
2583 2979
@@ -2776,6 +3172,9 @@ static int btusb_probe(struct usb_interface *intf,
2776 hdev->set_bdaddr = btusb_set_bdaddr_ath3012; 3172 hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
2777 } 3173 }
2778 3174
3175 if (id->driver_info & BTUSB_REALTEK)
3176 hdev->setup = btusb_setup_realtek;
3177
2779 if (id->driver_info & BTUSB_AMP) { 3178 if (id->driver_info & BTUSB_AMP) {
2780 /* AMP controllers do not support SCO packets */ 3179 /* AMP controllers do not support SCO packets */
2781 data->isoc = NULL; 3180 data->isoc = NULL;
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 1b3f8647ea2f..ec8fa0e0f036 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -95,7 +95,6 @@ static void ath_hci_uart_work(struct work_struct *work)
95 hci_uart_tx_wakeup(hu); 95 hci_uart_tx_wakeup(hu);
96} 96}
97 97
98/* Initialize protocol */
99static int ath_open(struct hci_uart *hu) 98static int ath_open(struct hci_uart *hu)
100{ 99{
101 struct ath_struct *ath; 100 struct ath_struct *ath;
@@ -116,8 +115,7 @@ static int ath_open(struct hci_uart *hu)
116 return 0; 115 return 0;
117} 116}
118 117
119/* Flush protocol data */ 118static int ath_close(struct hci_uart *hu)
120static int ath_flush(struct hci_uart *hu)
121{ 119{
122 struct ath_struct *ath = hu->priv; 120 struct ath_struct *ath = hu->priv;
123 121
@@ -125,11 +123,17 @@ static int ath_flush(struct hci_uart *hu)
125 123
126 skb_queue_purge(&ath->txq); 124 skb_queue_purge(&ath->txq);
127 125
126 kfree_skb(ath->rx_skb);
127
128 cancel_work_sync(&ath->ctxtsw);
129
130 hu->priv = NULL;
131 kfree(ath);
132
128 return 0; 133 return 0;
129} 134}
130 135
131/* Close protocol */ 136static int ath_flush(struct hci_uart *hu)
132static int ath_close(struct hci_uart *hu)
133{ 137{
134 struct ath_struct *ath = hu->priv; 138 struct ath_struct *ath = hu->priv;
135 139
@@ -137,19 +141,65 @@ static int ath_close(struct hci_uart *hu)
137 141
138 skb_queue_purge(&ath->txq); 142 skb_queue_purge(&ath->txq);
139 143
140 kfree_skb(ath->rx_skb); 144 return 0;
145}
141 146
142 cancel_work_sync(&ath->ctxtsw); 147static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
148{
149 struct sk_buff *skb;
150 u8 buf[10];
151 int err;
152
153 buf[0] = 0x01;
154 buf[1] = 0x01;
155 buf[2] = 0x00;
156 buf[3] = sizeof(bdaddr_t);
157 memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
158
159 skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
160 if (IS_ERR(skb)) {
161 err = PTR_ERR(skb);
162 BT_ERR("%s: Change address command failed (%d)",
163 hdev->name, err);
164 return err;
165 }
166 kfree_skb(skb);
143 167
144 hu->priv = NULL; 168 return 0;
145 kfree(ath); 169}
170
171static int ath_setup(struct hci_uart *hu)
172{
173 BT_DBG("hu %p", hu);
174
175 hu->hdev->set_bdaddr = ath_set_bdaddr;
146 176
147 return 0; 177 return 0;
148} 178}
149 179
180static const struct h4_recv_pkt ath_recv_pkts[] = {
181 { H4_RECV_ACL, .recv = hci_recv_frame },
182 { H4_RECV_SCO, .recv = hci_recv_frame },
183 { H4_RECV_EVENT, .recv = hci_recv_frame },
184};
185
186static int ath_recv(struct hci_uart *hu, const void *data, int count)
187{
188 struct ath_struct *ath = hu->priv;
189
190 ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
191 ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
192 if (IS_ERR(ath->rx_skb)) {
193 int err = PTR_ERR(ath->rx_skb);
194 BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
195 return err;
196 }
197
198 return count;
199}
200
150#define HCI_OP_ATH_SLEEP 0xFC04 201#define HCI_OP_ATH_SLEEP 0xFC04
151 202
152/* Enqueue frame for transmittion */
153static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) 203static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
154{ 204{
155 struct ath_struct *ath = hu->priv; 205 struct ath_struct *ath = hu->priv;
@@ -159,8 +209,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
159 return 0; 209 return 0;
160 } 210 }
161 211
162 /* 212 /* Update power management enable flag with parameters of
163 * Update power management enable flag with parameters of
164 * HCI sleep enable vendor specific HCI command. 213 * HCI sleep enable vendor specific HCI command.
165 */ 214 */
166 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { 215 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
@@ -190,37 +239,16 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
190 return skb_dequeue(&ath->txq); 239 return skb_dequeue(&ath->txq);
191} 240}
192 241
193static const struct h4_recv_pkt ath_recv_pkts[] = {
194 { H4_RECV_ACL, .recv = hci_recv_frame },
195 { H4_RECV_SCO, .recv = hci_recv_frame },
196 { H4_RECV_EVENT, .recv = hci_recv_frame },
197};
198
199/* Recv data */
200static int ath_recv(struct hci_uart *hu, const void *data, int count)
201{
202 struct ath_struct *ath = hu->priv;
203
204 ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
205 ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
206 if (IS_ERR(ath->rx_skb)) {
207 int err = PTR_ERR(ath->rx_skb);
208 BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
209 return err;
210 }
211
212 return count;
213}
214
215static const struct hci_uart_proto athp = { 242static const struct hci_uart_proto athp = {
216 .id = HCI_UART_ATH3K, 243 .id = HCI_UART_ATH3K,
217 .name = "ATH3K", 244 .name = "ATH3K",
218 .open = ath_open, 245 .open = ath_open,
219 .close = ath_close, 246 .close = ath_close,
247 .flush = ath_flush,
248 .setup = ath_setup,
220 .recv = ath_recv, 249 .recv = ath_recv,
221 .enqueue = ath_enqueue, 250 .enqueue = ath_enqueue,
222 .dequeue = ath_dequeue, 251 .dequeue = ath_dequeue,
223 .flush = ath_flush,
224}; 252};
225 253
226int __init ath_init(void) 254int __init ath_init(void)
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index b854125e4831..5340604b23a4 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -660,7 +660,7 @@ validate_group(struct perf_event *event)
660 * Initialise the fake PMU. We only need to populate the 660 * Initialise the fake PMU. We only need to populate the
661 * used_mask for the purposes of validation. 661 * used_mask for the purposes of validation.
662 */ 662 */
663 .used_mask = CPU_BITS_NONE, 663 .used_mask = { 0 },
664 }; 664 };
665 665
666 if (!validate_event(event->pmu, &fake_pmu, leader)) 666 if (!validate_event(event->pmu, &fake_pmu, leader))
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index 11f7982cbdb3..ebee57d715d2 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * OMAP L3 Interconnect error handling driver 2 * OMAP L3 Interconnect error handling driver
3 * 3 *
4 * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ 4 * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Sricharan <r.sricharan@ti.com> 6 * Sricharan <r.sricharan@ti.com>
7 * 7 *
@@ -233,7 +233,8 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
233} 233}
234 234
235static const struct of_device_id l3_noc_match[] = { 235static const struct of_device_id l3_noc_match[] = {
236 {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data}, 236 {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
237 {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
237 {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data}, 238 {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
238 {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data}, 239 {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
239 {}, 240 {},
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index 95254585db86..73431f81da28 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * OMAP L3 Interconnect error handling driver header 2 * OMAP L3 Interconnect error handling driver header
3 * 3 *
4 * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ 4 * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * sricharan <r.sricharan@ti.com> 6 * sricharan <r.sricharan@ti.com>
7 * 7 *
@@ -175,16 +175,14 @@ static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
175}; 175};
176 176
177 177
178static struct l3_target_data omap_l3_target_data_clk3[] = { 178static struct l3_target_data omap4_l3_target_data_clk3[] = {
179 {0x0100, "EMUSS",}, 179 {0x0100, "DEBUGSS",},
180 {0x0300, "DEBUG SOURCE",},
181 {0x0, "HOST CLK3",},
182}; 180};
183 181
184static struct l3_flagmux_data omap_l3_flagmux_clk3 = { 182static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
185 .offset = 0x0200, 183 .offset = 0x0200,
186 .l3_targ = omap_l3_target_data_clk3, 184 .l3_targ = omap4_l3_target_data_clk3,
187 .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3), 185 .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
188}; 186};
189 187
190static struct l3_masters_data omap_l3_masters[] = { 188static struct l3_masters_data omap_l3_masters[] = {
@@ -215,21 +213,49 @@ static struct l3_masters_data omap_l3_masters[] = {
215 { 0x32, "USBHOSTFS"} 213 { 0x32, "USBHOSTFS"}
216}; 214};
217 215
218static struct l3_flagmux_data *omap_l3_flagmux[] = { 216static struct l3_flagmux_data *omap4_l3_flagmux[] = {
219 &omap_l3_flagmux_clk1, 217 &omap_l3_flagmux_clk1,
220 &omap_l3_flagmux_clk2, 218 &omap_l3_flagmux_clk2,
221 &omap_l3_flagmux_clk3, 219 &omap4_l3_flagmux_clk3,
222}; 220};
223 221
224static const struct omap_l3 omap_l3_data = { 222static const struct omap_l3 omap4_l3_data = {
225 .l3_flagmux = omap_l3_flagmux, 223 .l3_flagmux = omap4_l3_flagmux,
226 .num_modules = ARRAY_SIZE(omap_l3_flagmux), 224 .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
227 .l3_masters = omap_l3_masters, 225 .l3_masters = omap_l3_masters,
228 .num_masters = ARRAY_SIZE(omap_l3_masters), 226 .num_masters = ARRAY_SIZE(omap_l3_masters),
229 /* The 6 MSBs of register field used to distinguish initiator */ 227 /* The 6 MSBs of register field used to distinguish initiator */
230 .mst_addr_mask = 0xFC, 228 .mst_addr_mask = 0xFC,
231}; 229};
232 230
231/* OMAP5 data */
232static struct l3_target_data omap5_l3_target_data_clk3[] = {
233 {0x0100, "L3INSTR",},
234 {0x0300, "DEBUGSS",},
235 {0x0, "HOSTCLK3",},
236};
237
238static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
239 .offset = 0x0200,
240 .l3_targ = omap5_l3_target_data_clk3,
241 .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
242};
243
244static struct l3_flagmux_data *omap5_l3_flagmux[] = {
245 &omap_l3_flagmux_clk1,
246 &omap_l3_flagmux_clk2,
247 &omap5_l3_flagmux_clk3,
248};
249
250static const struct omap_l3 omap5_l3_data = {
251 .l3_flagmux = omap5_l3_flagmux,
252 .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
253 .l3_masters = omap_l3_masters,
254 .num_masters = ARRAY_SIZE(omap_l3_masters),
255 /* The 6 MSBs of register field used to distinguish initiator */
256 .mst_addr_mask = 0x7E0,
257};
258
233/* DRA7 data */ 259/* DRA7 data */
234static struct l3_target_data dra_l3_target_data_clk1[] = { 260static struct l3_target_data dra_l3_target_data_clk1[] = {
235 {0x2a00, "AES1",}, 261 {0x2a00, "AES1",},
@@ -274,7 +300,7 @@ static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
274 300
275static struct l3_target_data dra_l3_target_data_clk2[] = { 301static struct l3_target_data dra_l3_target_data_clk2[] = {
276 {0x0, "HOST CLK1",}, 302 {0x0, "HOST CLK1",},
277 {0x0, "HOST CLK2",}, 303 {0x800000, "HOST CLK2",},
278 {0xdead, L3_TARGET_NOT_SUPPORTED,}, 304 {0xdead, L3_TARGET_NOT_SUPPORTED,},
279 {0x3400, "SHA2_2",}, 305 {0x3400, "SHA2_2",},
280 {0x0900, "BB2D",}, 306 {0x0900, "BB2D",},
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index d1494ecd9e11..4b31f1387f37 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng)
57 val &= ~RNG_EN; 57 val &= ~RNG_EN;
58 __raw_writel(val, priv->regs + RNG_CTRL); 58 __raw_writel(val, priv->regs + RNG_CTRL);
59 59
60 clk_didsable_unprepare(prov->clk); 60 clk_disable_unprepare(priv->clk);
61} 61}
62 62
63static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) 63static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
@@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
97 priv->rng.name = pdev->name; 97 priv->rng.name = pdev->name;
98 priv->rng.init = bcm63xx_rng_init; 98 priv->rng.init = bcm63xx_rng_init;
99 priv->rng.cleanup = bcm63xx_rng_cleanup; 99 priv->rng.cleanup = bcm63xx_rng_cleanup;
100 prov->rng.data_present = bcm63xx_rng_data_present; 100 priv->rng.data_present = bcm63xx_rng_data_present;
101 priv->rng.data_read = bcm63xx_rng_data_read; 101 priv->rng.data_read = bcm63xx_rng_data_read;
102 102
103 priv->clk = devm_clk_get(&pdev->dev, "ipsec"); 103 priv->clk = devm_clk_get(&pdev->dev, "ipsec");
104 if (IS_ERR(priv->clk)) { 104 if (IS_ERR(priv->clk)) {
105 error = PTR_ERR(priv->clk); 105 ret = PTR_ERR(priv->clk);
106 dev_err(&pdev->dev, "no clock for device: %d\n", error); 106 dev_err(&pdev->dev, "no clock for device: %d\n", ret);
107 return error; 107 return ret;
108 } 108 }
109 109
110 if (!devm_request_mem_region(&pdev->dev, r->start, 110 if (!devm_request_mem_region(&pdev->dev, r->start,
@@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
120 return -ENOMEM; 120 return -ENOMEM;
121 } 121 }
122 122
123 error = devm_hwrng_register(&pdev->dev, &priv->rng); 123 ret = devm_hwrng_register(&pdev->dev, &priv->rng);
124 if (error) { 124 if (ret) {
125 dev_err(&pdev->dev, "failed to register rng device: %d\n", 125 dev_err(&pdev->dev, "failed to register rng device: %d\n",
126 error); 126 ret);
127 return error; 127 return ret;
128 } 128 }
129 129
130 dev_info(&pdev->dev, "registered RNG driver\n"); 130 dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9bb592872532..bf75f6361773 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2000,7 +2000,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
2000 seq_printf(m, " %x", intf->channels[i].address); 2000 seq_printf(m, " %x", intf->channels[i].address);
2001 seq_putc(m, '\n'); 2001 seq_putc(m, '\n');
2002 2002
2003 return seq_has_overflowed(m); 2003 return 0;
2004} 2004}
2005 2005
2006static int smi_ipmb_proc_open(struct inode *inode, struct file *file) 2006static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2023,7 +2023,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
2023 ipmi_version_major(&intf->bmc->id), 2023 ipmi_version_major(&intf->bmc->id),
2024 ipmi_version_minor(&intf->bmc->id)); 2024 ipmi_version_minor(&intf->bmc->id));
2025 2025
2026 return seq_has_overflowed(m); 2026 return 0;
2027} 2027}
2028 2028
2029static int smi_version_proc_open(struct inode *inode, struct file *file) 2029static int smi_version_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5e90a18afbaf..8a45e92ff60c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -942,8 +942,7 @@ static void sender(void *send_info,
942 * If we are running to completion, start it and run 942 * If we are running to completion, start it and run
943 * transactions until everything is clear. 943 * transactions until everything is clear.
944 */ 944 */
945 smi_info->curr_msg = msg; 945 smi_info->waiting_msg = msg;
946 smi_info->waiting_msg = NULL;
947 946
948 /* 947 /*
949 * Run to completion means we are single-threaded, no 948 * Run to completion means we are single-threaded, no
@@ -2244,7 +2243,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
2244 acpi_handle handle; 2243 acpi_handle handle;
2245 acpi_status status; 2244 acpi_status status;
2246 unsigned long long tmp; 2245 unsigned long long tmp;
2247 int rv; 2246 int rv = -EINVAL;
2248 2247
2249 acpi_dev = pnp_acpi_device(dev); 2248 acpi_dev = pnp_acpi_device(dev);
2250 if (!acpi_dev) 2249 if (!acpi_dev)
@@ -2262,8 +2261,10 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
2262 2261
2263 /* _IFT tells us the interface type: KCS, BT, etc */ 2262 /* _IFT tells us the interface type: KCS, BT, etc */
2264 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); 2263 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
2265 if (ACPI_FAILURE(status)) 2264 if (ACPI_FAILURE(status)) {
2265 dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
2266 goto err_free; 2266 goto err_free;
2267 }
2267 2268
2268 switch (tmp) { 2269 switch (tmp) {
2269 case 1: 2270 case 1:
@@ -2276,6 +2277,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
2276 info->si_type = SI_BT; 2277 info->si_type = SI_BT;
2277 break; 2278 break;
2278 case 4: /* SSIF, just ignore */ 2279 case 4: /* SSIF, just ignore */
2280 rv = -ENODEV;
2279 goto err_free; 2281 goto err_free;
2280 default: 2282 default:
2281 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); 2283 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
@@ -2336,7 +2338,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
2336 2338
2337err_free: 2339err_free:
2338 kfree(info); 2340 kfree(info);
2339 return -EINVAL; 2341 return rv;
2340} 2342}
2341 2343
2342static void ipmi_pnp_remove(struct pnp_dev *dev) 2344static void ipmi_pnp_remove(struct pnp_dev *dev)
@@ -3080,7 +3082,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
3080 3082
3081 seq_printf(m, "%s\n", si_to_str[smi->si_type]); 3083 seq_printf(m, "%s\n", si_to_str[smi->si_type]);
3082 3084
3083 return seq_has_overflowed(m); 3085 return 0;
3084} 3086}
3085 3087
3086static int smi_type_proc_open(struct inode *inode, struct file *file) 3088static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3153,7 +3155,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
3153 smi->irq, 3155 smi->irq,
3154 smi->slave_addr); 3156 smi->slave_addr);
3155 3157
3156 return seq_has_overflowed(m); 3158 return 0;
3157} 3159}
3158 3160
3159static int smi_params_proc_open(struct inode *inode, struct file *file) 3161static int smi_params_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f40e3bd2c69c..207689c444a8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -31,7 +31,6 @@
31 * interface into the I2C driver, I believe. 31 * interface into the I2C driver, I believe.
32 */ 32 */
33 33
34#include <linux/version.h>
35#if defined(MODVERSIONS) 34#if defined(MODVERSIONS)
36#include <linux/modversions.h> 35#include <linux/modversions.h>
37#endif 36#endif
@@ -166,6 +165,9 @@ enum ssif_stat_indexes {
166 /* Number of watchdog pretimeouts. */ 165 /* Number of watchdog pretimeouts. */
167 SSIF_STAT_watchdog_pretimeouts, 166 SSIF_STAT_watchdog_pretimeouts,
168 167
168 /* Number of alers received. */
169 SSIF_STAT_alerts,
170
169 /* Always add statistics before this value, it must be last. */ 171 /* Always add statistics before this value, it must be last. */
170 SSIF_NUM_STATS 172 SSIF_NUM_STATS
171}; 173};
@@ -214,7 +216,16 @@ struct ssif_info {
214#define WDT_PRE_TIMEOUT_INT 0x08 216#define WDT_PRE_TIMEOUT_INT 0x08
215 unsigned char msg_flags; 217 unsigned char msg_flags;
216 218
219 u8 global_enables;
217 bool has_event_buffer; 220 bool has_event_buffer;
221 bool supports_alert;
222
223 /*
224 * Used to tell what we should do with alerts. If we are
225 * waiting on a response, read the data immediately.
226 */
227 bool got_alert;
228 bool waiting_alert;
218 229
219 /* 230 /*
220 * If set to true, this will request events the next time the 231 * If set to true, this will request events the next time the
@@ -478,13 +489,13 @@ static int ipmi_ssif_thread(void *data)
478 489
479 if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { 490 if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
480 result = i2c_smbus_write_block_data( 491 result = i2c_smbus_write_block_data(
481 ssif_info->client, SSIF_IPMI_REQUEST, 492 ssif_info->client, ssif_info->i2c_command,
482 ssif_info->i2c_data[0], 493 ssif_info->i2c_data[0],
483 ssif_info->i2c_data + 1); 494 ssif_info->i2c_data + 1);
484 ssif_info->done_handler(ssif_info, result, NULL, 0); 495 ssif_info->done_handler(ssif_info, result, NULL, 0);
485 } else { 496 } else {
486 result = i2c_smbus_read_block_data( 497 result = i2c_smbus_read_block_data(
487 ssif_info->client, SSIF_IPMI_RESPONSE, 498 ssif_info->client, ssif_info->i2c_command,
488 ssif_info->i2c_data); 499 ssif_info->i2c_data);
489 if (result < 0) 500 if (result < 0)
490 ssif_info->done_handler(ssif_info, result, 501 ssif_info->done_handler(ssif_info, result,
@@ -518,15 +529,12 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
518static void msg_done_handler(struct ssif_info *ssif_info, int result, 529static void msg_done_handler(struct ssif_info *ssif_info, int result,
519 unsigned char *data, unsigned int len); 530 unsigned char *data, unsigned int len);
520 531
521static void retry_timeout(unsigned long data) 532static void start_get(struct ssif_info *ssif_info)
522{ 533{
523 struct ssif_info *ssif_info = (void *) data;
524 int rv; 534 int rv;
525 535
526 if (ssif_info->stopping)
527 return;
528
529 ssif_info->rtc_us_timer = 0; 536 ssif_info->rtc_us_timer = 0;
537 ssif_info->multi_pos = 0;
530 538
531 rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, 539 rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
532 SSIF_IPMI_RESPONSE, 540 SSIF_IPMI_RESPONSE,
@@ -540,6 +548,46 @@ static void retry_timeout(unsigned long data)
540 } 548 }
541} 549}
542 550
551static void retry_timeout(unsigned long data)
552{
553 struct ssif_info *ssif_info = (void *) data;
554 unsigned long oflags, *flags;
555 bool waiting;
556
557 if (ssif_info->stopping)
558 return;
559
560 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
561 waiting = ssif_info->waiting_alert;
562 ssif_info->waiting_alert = false;
563 ipmi_ssif_unlock_cond(ssif_info, flags);
564
565 if (waiting)
566 start_get(ssif_info);
567}
568
569
570static void ssif_alert(struct i2c_client *client, unsigned int data)
571{
572 struct ssif_info *ssif_info = i2c_get_clientdata(client);
573 unsigned long oflags, *flags;
574 bool do_get = false;
575
576 ssif_inc_stat(ssif_info, alerts);
577
578 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
579 if (ssif_info->waiting_alert) {
580 ssif_info->waiting_alert = false;
581 del_timer(&ssif_info->retry_timer);
582 do_get = true;
583 } else if (ssif_info->curr_msg) {
584 ssif_info->got_alert = true;
585 }
586 ipmi_ssif_unlock_cond(ssif_info, flags);
587 if (do_get)
588 start_get(ssif_info);
589}
590
543static int start_resend(struct ssif_info *ssif_info); 591static int start_resend(struct ssif_info *ssif_info);
544 592
545static void msg_done_handler(struct ssif_info *ssif_info, int result, 593static void msg_done_handler(struct ssif_info *ssif_info, int result,
@@ -559,9 +607,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
559 if (ssif_info->retries_left > 0) { 607 if (ssif_info->retries_left > 0) {
560 ssif_inc_stat(ssif_info, receive_retries); 608 ssif_inc_stat(ssif_info, receive_retries);
561 609
610 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
611 ssif_info->waiting_alert = true;
612 ssif_info->rtc_us_timer = SSIF_MSG_USEC;
562 mod_timer(&ssif_info->retry_timer, 613 mod_timer(&ssif_info->retry_timer,
563 jiffies + SSIF_MSG_JIFFIES); 614 jiffies + SSIF_MSG_JIFFIES);
564 ssif_info->rtc_us_timer = SSIF_MSG_USEC; 615 ipmi_ssif_unlock_cond(ssif_info, flags);
565 return; 616 return;
566 } 617 }
567 618
@@ -581,9 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
581 ssif_inc_stat(ssif_info, received_message_parts); 632 ssif_inc_stat(ssif_info, received_message_parts);
582 633
583 /* Remove the multi-part read marker. */ 634 /* Remove the multi-part read marker. */
584 for (i = 0; i < (len-2); i++)
585 ssif_info->data[i] = data[i+2];
586 len -= 2; 635 len -= 2;
636 for (i = 0; i < len; i++)
637 ssif_info->data[i] = data[i+2];
587 ssif_info->multi_len = len; 638 ssif_info->multi_len = len;
588 ssif_info->multi_pos = 1; 639 ssif_info->multi_pos = 1;
589 640
@@ -610,9 +661,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
610 goto continue_op; 661 goto continue_op;
611 } 662 }
612 663
613 blocknum = data[ssif_info->multi_len]; 664 blocknum = data[0];
614 665
615 if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) { 666 if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
616 /* Received message too big, abort the operation. */ 667 /* Received message too big, abort the operation. */
617 result = -E2BIG; 668 result = -E2BIG;
618 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) 669 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -622,15 +673,15 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
622 } 673 }
623 674
624 /* Remove the blocknum from the data. */ 675 /* Remove the blocknum from the data. */
625 for (i = 0; i < (len-1); i++)
626 ssif_info->data[i+ssif_info->multi_len] = data[i+1];
627 len--; 676 len--;
677 for (i = 0; i < len; i++)
678 ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
628 ssif_info->multi_len += len; 679 ssif_info->multi_len += len;
629 if (blocknum == 0xff) { 680 if (blocknum == 0xff) {
630 /* End of read */ 681 /* End of read */
631 len = ssif_info->multi_len; 682 len = ssif_info->multi_len;
632 data = ssif_info->data; 683 data = ssif_info->data;
633 } else if ((blocknum+1) != ssif_info->multi_pos) { 684 } else if (blocknum + 1 != ssif_info->multi_pos) {
634 /* 685 /*
635 * Out of sequence block, just abort. Block 686 * Out of sequence block, just abort. Block
636 * numbers start at zero for the second block, 687 * numbers start at zero for the second block,
@@ -650,7 +701,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
650 if (rv < 0) { 701 if (rv < 0) {
651 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) 702 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
652 pr_info(PFX 703 pr_info(PFX
653 "Error from i2c_non_blocking_op(2)\n"); 704 "Error from ssif_i2c_send\n");
654 705
655 result = -EIO; 706 result = -EIO;
656 } else 707 } else
@@ -830,7 +881,11 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
830 } 881 }
831 882
832 if (ssif_info->multi_data) { 883 if (ssif_info->multi_data) {
833 /* In the middle of a multi-data write. */ 884 /*
885 * In the middle of a multi-data write. See the comment
886 * in the SSIF_MULTI_n_PART case in the probe function
887 * for details on the intricacies of this.
888 */
834 int left; 889 int left;
835 890
836 ssif_inc_stat(ssif_info, sent_messages_parts); 891 ssif_inc_stat(ssif_info, sent_messages_parts);
@@ -864,15 +919,32 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
864 msg_done_handler(ssif_info, -EIO, NULL, 0); 919 msg_done_handler(ssif_info, -EIO, NULL, 0);
865 } 920 }
866 } else { 921 } else {
922 unsigned long oflags, *flags;
923 bool got_alert;
924
867 ssif_inc_stat(ssif_info, sent_messages); 925 ssif_inc_stat(ssif_info, sent_messages);
868 ssif_inc_stat(ssif_info, sent_messages_parts); 926 ssif_inc_stat(ssif_info, sent_messages_parts);
869 927
870 /* Wait a jiffie then request the next message */ 928 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
871 ssif_info->retries_left = SSIF_RECV_RETRIES; 929 got_alert = ssif_info->got_alert;
872 ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; 930 if (got_alert) {
873 mod_timer(&ssif_info->retry_timer, 931 ssif_info->got_alert = false;
874 jiffies + SSIF_MSG_PART_JIFFIES); 932 ssif_info->waiting_alert = false;
875 return; 933 }
934
935 if (got_alert) {
936 ipmi_ssif_unlock_cond(ssif_info, flags);
937 /* The alert already happened, try now. */
938 retry_timeout((unsigned long) ssif_info);
939 } else {
940 /* Wait a jiffie then request the next message */
941 ssif_info->waiting_alert = true;
942 ssif_info->retries_left = SSIF_RECV_RETRIES;
943 ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
944 mod_timer(&ssif_info->retry_timer,
945 jiffies + SSIF_MSG_PART_JIFFIES);
946 ipmi_ssif_unlock_cond(ssif_info, flags);
947 }
876 } 948 }
877} 949}
878 950
@@ -881,6 +953,8 @@ static int start_resend(struct ssif_info *ssif_info)
881 int rv; 953 int rv;
882 int command; 954 int command;
883 955
956 ssif_info->got_alert = false;
957
884 if (ssif_info->data_len > 32) { 958 if (ssif_info->data_len > 32) {
885 command = SSIF_IPMI_MULTI_PART_REQUEST_START; 959 command = SSIF_IPMI_MULTI_PART_REQUEST_START;
886 ssif_info->multi_data = ssif_info->data; 960 ssif_info->multi_data = ssif_info->data;
@@ -915,7 +989,7 @@ static int start_send(struct ssif_info *ssif_info,
915 return -E2BIG; 989 return -E2BIG;
916 990
917 ssif_info->retries_left = SSIF_SEND_RETRIES; 991 ssif_info->retries_left = SSIF_SEND_RETRIES;
918 memcpy(ssif_info->data+1, data, len); 992 memcpy(ssif_info->data + 1, data, len);
919 ssif_info->data_len = len; 993 ssif_info->data_len = len;
920 return start_resend(ssif_info); 994 return start_resend(ssif_info);
921} 995}
@@ -1200,7 +1274,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
1200{ 1274{
1201 seq_puts(m, "ssif\n"); 1275 seq_puts(m, "ssif\n");
1202 1276
1203 return seq_has_overflowed(m); 1277 return 0;
1204} 1278}
1205 1279
1206static int smi_type_proc_open(struct inode *inode, struct file *file) 1280static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -1243,6 +1317,8 @@ static int smi_stats_proc_show(struct seq_file *m, void *v)
1243 ssif_get_stat(ssif_info, events)); 1317 ssif_get_stat(ssif_info, events));
1244 seq_printf(m, "watchdog_pretimeouts: %u\n", 1318 seq_printf(m, "watchdog_pretimeouts: %u\n",
1245 ssif_get_stat(ssif_info, watchdog_pretimeouts)); 1319 ssif_get_stat(ssif_info, watchdog_pretimeouts));
1320 seq_printf(m, "alerts: %u\n",
1321 ssif_get_stat(ssif_info, alerts));
1246 return 0; 1322 return 0;
1247} 1323}
1248 1324
@@ -1258,6 +1334,23 @@ static const struct file_operations smi_stats_proc_ops = {
1258 .release = single_release, 1334 .release = single_release,
1259}; 1335};
1260 1336
1337static int strcmp_nospace(char *s1, char *s2)
1338{
1339 while (*s1 && *s2) {
1340 while (isspace(*s1))
1341 s1++;
1342 while (isspace(*s2))
1343 s2++;
1344 if (*s1 > *s2)
1345 return 1;
1346 if (*s1 < *s2)
1347 return -1;
1348 s1++;
1349 s2++;
1350 }
1351 return 0;
1352}
1353
1261static struct ssif_addr_info *ssif_info_find(unsigned short addr, 1354static struct ssif_addr_info *ssif_info_find(unsigned short addr,
1262 char *adapter_name, 1355 char *adapter_name,
1263 bool match_null_name) 1356 bool match_null_name)
@@ -1272,8 +1365,10 @@ restart:
1272 /* One is NULL and one is not */ 1365 /* One is NULL and one is not */
1273 continue; 1366 continue;
1274 } 1367 }
1275 if (strcmp(info->adapter_name, adapter_name)) 1368 if (adapter_name &&
1276 /* Names to not match */ 1369 strcmp_nospace(info->adapter_name,
1370 adapter_name))
1371 /* Names do not match */
1277 continue; 1372 continue;
1278 } 1373 }
1279 found = info; 1374 found = info;
@@ -1306,6 +1401,12 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
1306 return false; 1401 return false;
1307} 1402}
1308 1403
1404/*
1405 * Global enables we care about.
1406 */
1407#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
1408 IPMI_BMC_EVT_MSG_INTR)
1409
1309static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) 1410static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1310{ 1411{
1311 unsigned char msg[3]; 1412 unsigned char msg[3];
@@ -1391,13 +1492,33 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1391 break; 1492 break;
1392 1493
1393 case SSIF_MULTI_2_PART: 1494 case SSIF_MULTI_2_PART:
1394 if (ssif_info->max_xmit_msg_size > 64) 1495 if (ssif_info->max_xmit_msg_size > 63)
1395 ssif_info->max_xmit_msg_size = 64; 1496 ssif_info->max_xmit_msg_size = 63;
1396 if (ssif_info->max_recv_msg_size > 62) 1497 if (ssif_info->max_recv_msg_size > 62)
1397 ssif_info->max_recv_msg_size = 62; 1498 ssif_info->max_recv_msg_size = 62;
1398 break; 1499 break;
1399 1500
1400 case SSIF_MULTI_n_PART: 1501 case SSIF_MULTI_n_PART:
1502 /*
1503 * The specification is rather confusing at
1504 * this point, but I think I understand what
1505 * is meant. At least I have a workable
1506 * solution. With multi-part messages, you
1507 * cannot send a message that is a multiple of
1508 * 32-bytes in length, because the start and
1509 * middle messages are 32-bytes and the end
1510 * message must be at least one byte. You
1511 * can't fudge on an extra byte, that would
1512 * screw up things like fru data writes. So
1513 * we limit the length to 63 bytes. That way
1514 * a 32-byte message gets sent as a single
1515 * part. A larger message will be a 32-byte
1516 * start and the next message is always going
1517 * to be 1-31 bytes in length. Not ideal, but
1518 * it should work.
1519 */
1520 if (ssif_info->max_xmit_msg_size > 63)
1521 ssif_info->max_xmit_msg_size = 63;
1401 break; 1522 break;
1402 1523
1403 default: 1524 default:
@@ -1407,7 +1528,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1407 } else { 1528 } else {
1408 no_support: 1529 no_support:
1409 /* Assume no multi-part or PEC support */ 1530 /* Assume no multi-part or PEC support */
1410 pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", 1531 pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
1411 rv, len, resp[2]); 1532 rv, len, resp[2]);
1412 1533
1413 ssif_info->max_xmit_msg_size = 32; 1534 ssif_info->max_xmit_msg_size = 32;
@@ -1436,6 +1557,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1436 goto found; 1557 goto found;
1437 } 1558 }
1438 1559
1560 ssif_info->global_enables = resp[3];
1561
1439 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { 1562 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1440 ssif_info->has_event_buffer = true; 1563 ssif_info->has_event_buffer = true;
1441 /* buffer is already enabled, nothing to do. */ 1564 /* buffer is already enabled, nothing to do. */
@@ -1444,18 +1567,37 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1444 1567
1445 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1568 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1446 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 1569 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1447 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; 1570 msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
1448 rv = do_cmd(client, 3, msg, &len, resp); 1571 rv = do_cmd(client, 3, msg, &len, resp);
1449 if (rv || (len < 2)) { 1572 if (rv || (len < 2)) {
1450 pr_warn(PFX "Error getting global enables: %d %d %2.2x\n", 1573 pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
1451 rv, len, resp[2]); 1574 rv, len, resp[2]);
1452 rv = 0; /* Not fatal */ 1575 rv = 0; /* Not fatal */
1453 goto found; 1576 goto found;
1454 } 1577 }
1455 1578
1456 if (resp[2] == 0) 1579 if (resp[2] == 0) {
1457 /* A successful return means the event buffer is supported. */ 1580 /* A successful return means the event buffer is supported. */
1458 ssif_info->has_event_buffer = true; 1581 ssif_info->has_event_buffer = true;
1582 ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
1583 }
1584
1585 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1586 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1587 msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
1588 rv = do_cmd(client, 3, msg, &len, resp);
1589 if (rv || (len < 2)) {
1590 pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
1591 rv, len, resp[2]);
1592 rv = 0; /* Not fatal */
1593 goto found;
1594 }
1595
1596 if (resp[2] == 0) {
1597 /* A successful return means the alert is supported. */
1598 ssif_info->supports_alert = true;
1599 ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
1600 }
1459 1601
1460 found: 1602 found:
1461 ssif_info->intf_num = atomic_inc_return(&next_intf); 1603 ssif_info->intf_num = atomic_inc_return(&next_intf);
@@ -1813,6 +1955,7 @@ static struct i2c_driver ssif_i2c_driver = {
1813 }, 1955 },
1814 .probe = ssif_probe, 1956 .probe = ssif_probe,
1815 .remove = ssif_remove, 1957 .remove = ssif_remove,
1958 .alert = ssif_alert,
1816 .id_table = ssif_id, 1959 .id_table = ssif_id,
1817 .detect = ssif_detect 1960 .detect = ssif_detect
1818}; 1961};
@@ -1832,7 +1975,7 @@ static int init_ipmi_ssif(void)
1832 rv = new_ssif_client(addr[i], adapter_name[i], 1975 rv = new_ssif_client(addr[i], adapter_name[i],
1833 dbg[i], slave_addrs[i], 1976 dbg[i], slave_addrs[i],
1834 SI_HARDCODED); 1977 SI_HARDCODED);
1835 if (!rv) 1978 if (rv)
1836 pr_err(PFX 1979 pr_err(PFX
1837 "Couldn't add hardcoded device at addr 0x%x\n", 1980 "Couldn't add hardcoded device at addr 0x%x\n",
1838 addr[i]); 1981 addr[i]);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fd7ac13f2574..bda2cb06dc7a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -437,6 +437,7 @@ config IMG_MDC_DMA
437 437
438config XGENE_DMA 438config XGENE_DMA
439 tristate "APM X-Gene DMA support" 439 tristate "APM X-Gene DMA support"
440 depends on ARCH_XGENE || COMPILE_TEST
440 select DMA_ENGINE 441 select DMA_ENGINE
441 select DMA_ENGINE_RAID 442 select DMA_ENGINE_RAID
442 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 443 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 0e035a8cf401..2890d744bb1b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -571,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
571 571
572 chan = private_candidate(&mask, device, NULL, NULL); 572 chan = private_candidate(&mask, device, NULL, NULL);
573 if (chan) { 573 if (chan) {
574 dma_cap_set(DMA_PRIVATE, device->cap_mask);
575 device->privatecnt++;
574 err = dma_chan_get(chan); 576 err = dma_chan_get(chan);
575 if (err) { 577 if (err) {
576 pr_debug("%s: failed to get %s: (%d)\n", 578 pr_debug("%s: failed to get %s: (%d)\n",
577 __func__, dma_chan_name(chan), err); 579 __func__, dma_chan_name(chan), err);
578 chan = NULL; 580 chan = NULL;
581 if (--device->privatecnt == 0)
582 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
579 } 583 }
580 } 584 }
581 585
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f705798ce3eb..ebd8a5f398b0 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
673 * Power management 673 * Power management
674 */ 674 */
675 675
676#ifdef CONFIG_PM
676static int usb_dmac_runtime_suspend(struct device *dev) 677static int usb_dmac_runtime_suspend(struct device *dev)
677{ 678{
678 struct usb_dmac *dmac = dev_get_drvdata(dev); 679 struct usb_dmac *dmac = dev_get_drvdata(dev);
@@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev)
690 691
691 return usb_dmac_init(dmac); 692 return usb_dmac_init(dmac);
692} 693}
694#endif /* CONFIG_PM */
693 695
694static const struct dev_pm_ops usb_dmac_pm = { 696static const struct dev_pm_ops usb_dmac_pm = {
695 SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, 697 SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 87b8e3b900d2..5c55227a34c8 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
120 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 120 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
121 if (!entry) { 121 if (!entry) {
122 kset_unregister(map_kset); 122 kset_unregister(map_kset);
123 return entry; 123 map_kset = NULL;
124 return ERR_PTR(-ENOMEM);
124 } 125 }
125 126
126 memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, 127 memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
@@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
132 if (ret) { 133 if (ret) {
133 kobject_put(&entry->kobj); 134 kobject_put(&entry->kobj);
134 kset_unregister(map_kset); 135 kset_unregister(map_kset);
136 map_kset = NULL;
135 return ERR_PTR(ret); 137 return ERR_PTR(ret);
136 } 138 }
137 139
@@ -195,8 +197,6 @@ out_add_entry:
195 entry = *(map_entries + j); 197 entry = *(map_entries + j);
196 kobject_put(&entry->kobj); 198 kobject_put(&entry->kobj);
197 } 199 }
198 if (map_kset)
199 kset_unregister(map_kset);
200out: 200out:
201 return ret; 201 return ret;
202} 202}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index cd1d5bf48f36..b232397ad7ec 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1054,38 +1054,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
1054 dev_err(bank->dev, "Could not get gpio dbck\n"); 1054 dev_err(bank->dev, "Could not get gpio dbck\n");
1055} 1055}
1056 1056
1057static void
1058omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1059 unsigned int num)
1060{
1061 struct irq_chip_generic *gc;
1062 struct irq_chip_type *ct;
1063
1064 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1065 handle_simple_irq);
1066 if (!gc) {
1067 dev_err(bank->dev, "Memory alloc failed for gc\n");
1068 return;
1069 }
1070
1071 ct = gc->chip_types;
1072
1073 /* NOTE: No ack required, reading IRQ status clears it. */
1074 ct->chip.irq_mask = irq_gc_mask_set_bit;
1075 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
1076 ct->chip.irq_set_type = omap_gpio_irq_type;
1077
1078 if (bank->regs->wkup_en)
1079 ct->chip.irq_set_wake = omap_gpio_wake_enable;
1080
1081 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
1082 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
1083 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
1084}
1085
1086static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) 1057static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1087{ 1058{
1088 int j;
1089 static int gpio; 1059 static int gpio;
1090 int irq_base = 0; 1060 int irq_base = 0;
1091 int ret; 1061 int ret;
@@ -1132,6 +1102,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1132 } 1102 }
1133#endif 1103#endif
1134 1104
1105 /* MPUIO is a bit different, reading IRQ status clears it */
1106 if (bank->is_mpuio) {
1107 irqc->irq_ack = dummy_irq_chip.irq_ack;
1108 irqc->irq_mask = irq_gc_mask_set_bit;
1109 irqc->irq_unmask = irq_gc_mask_clr_bit;
1110 if (!bank->regs->wkup_en)
1111 irqc->irq_set_wake = NULL;
1112 }
1113
1135 ret = gpiochip_irqchip_add(&bank->chip, irqc, 1114 ret = gpiochip_irqchip_add(&bank->chip, irqc,
1136 irq_base, omap_gpio_irq_handler, 1115 irq_base, omap_gpio_irq_handler,
1137 IRQ_TYPE_NONE); 1116 IRQ_TYPE_NONE);
@@ -1145,15 +1124,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1145 gpiochip_set_chained_irqchip(&bank->chip, irqc, 1124 gpiochip_set_chained_irqchip(&bank->chip, irqc,
1146 bank->irq, omap_gpio_irq_handler); 1125 bank->irq, omap_gpio_irq_handler);
1147 1126
1148 for (j = 0; j < bank->width; j++) {
1149 int irq = irq_find_mapping(bank->chip.irqdomain, j);
1150 if (bank->is_mpuio) {
1151 omap_mpuio_alloc_gc(bank, irq, bank->width);
1152 irq_set_chip_and_handler(irq, NULL, NULL);
1153 set_irq_flags(irq, 0);
1154 }
1155 }
1156
1157 return 0; 1127 return 0;
1158} 1128}
1159 1129
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d2303d50f561..725d16138b74 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -550,7 +550,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
550 550
551 length = min(agpio->pin_table_length, (u16)(pin_index + bits)); 551 length = min(agpio->pin_table_length, (u16)(pin_index + bits));
552 for (i = pin_index; i < length; ++i) { 552 for (i = pin_index; i < length; ++i) {
553 unsigned pin = agpio->pin_table[i]; 553 int pin = agpio->pin_table[i];
554 struct acpi_gpio_connection *conn; 554 struct acpi_gpio_connection *conn;
555 struct gpio_desc *desc; 555 struct gpio_desc *desc;
556 bool found; 556 bool found;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 7722ed53bd65..af3bc7a8033b 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -551,6 +551,7 @@ static struct class gpio_class = {
551 */ 551 */
552int gpiod_export(struct gpio_desc *desc, bool direction_may_change) 552int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
553{ 553{
554 struct gpio_chip *chip;
554 unsigned long flags; 555 unsigned long flags;
555 int status; 556 int status;
556 const char *ioname = NULL; 557 const char *ioname = NULL;
@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
568 return -EINVAL; 569 return -EINVAL;
569 } 570 }
570 571
572 chip = desc->chip;
573
571 mutex_lock(&sysfs_lock); 574 mutex_lock(&sysfs_lock);
572 575
576 /* check if chip is being removed */
577 if (!chip || !chip->exported) {
578 status = -ENODEV;
579 goto fail_unlock;
580 }
581
573 spin_lock_irqsave(&gpio_lock, flags); 582 spin_lock_irqsave(&gpio_lock, flags);
574 if (!test_bit(FLAG_REQUESTED, &desc->flags) || 583 if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
575 test_bit(FLAG_EXPORT, &desc->flags)) { 584 test_bit(FLAG_EXPORT, &desc->flags)) {
@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
783{ 792{
784 int status; 793 int status;
785 struct device *dev; 794 struct device *dev;
795 struct gpio_desc *desc;
796 unsigned int i;
786 797
787 mutex_lock(&sysfs_lock); 798 mutex_lock(&sysfs_lock);
788 dev = class_find_device(&gpio_class, NULL, chip, match_export); 799 dev = class_find_device(&gpio_class, NULL, chip, match_export);
789 if (dev) { 800 if (dev) {
790 put_device(dev); 801 put_device(dev);
791 device_unregister(dev); 802 device_unregister(dev);
803 /* prevent further gpiod exports */
792 chip->exported = false; 804 chip->exported = false;
793 status = 0; 805 status = 0;
794 } else 806 } else
@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
797 809
798 if (status) 810 if (status)
799 chip_dbg(chip, "%s: status %d\n", __func__, status); 811 chip_dbg(chip, "%s: status %d\n", __func__, status);
812
813 /* unregister gpiod class devices owned by sysfs */
814 for (i = 0; i < chip->ngpio; i++) {
815 desc = &chip->desc[i];
816 if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
817 gpiod_free(desc);
818 }
800} 819}
801 820
802static int __init gpiolib_sysfs_init(void) 821static int __init gpiolib_sysfs_init(void)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 69af73f15310..596ee5cd3b84 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
430 430
431 BUG_ON(!dqm || !qpd); 431 BUG_ON(!dqm || !qpd);
432 432
433 BUG_ON(!list_empty(&qpd->queues_list)); 433 pr_debug("In func %s\n", __func__);
434 434
435 pr_debug("kfd: In func %s\n", __func__); 435 pr_debug("qpd->queues_list is %s\n",
436 list_empty(&qpd->queues_list) ? "empty" : "not empty");
436 437
437 retval = 0; 438 retval = 0;
438 mutex_lock(&dqm->lock); 439 mutex_lock(&dqm->lock);
@@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
882 return -ENOMEM; 883 return -ENOMEM;
883 } 884 }
884 885
886 init_sdma_vm(dqm, q, qpd);
887
885 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 888 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
886 &q->gart_mqd_addr, &q->properties); 889 &q->gart_mqd_addr, &q->properties);
887 if (retval != 0) 890 if (retval != 0)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 661c6605d31b..e469c4b2e8cc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
728 sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", 728 sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
729 dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( 729 dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
730 dev->gpu->kgd)); 730 dev->gpu->kgd));
731
731 sysfs_show_64bit_prop(buffer, "local_mem_size", 732 sysfs_show_64bit_prop(buffer, "local_mem_size",
732 dev->gpu->kfd2kgd->get_vmem_size( 733 (unsigned long long int) 0);
733 dev->gpu->kgd));
734 734
735 sysfs_show_32bit_prop(buffer, "fw_version", 735 sysfs_show_32bit_prop(buffer, "fw_version",
736 dev->gpu->kfd2kgd->get_fw_version( 736 dev->gpu->kfd2kgd->get_fw_version(
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c8a34476570a..af9662e58272 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
131 131
132 /* Reinitialize corresponding vblank timestamp if high-precision query 132 /* Reinitialize corresponding vblank timestamp if high-precision query
133 * available. Skip this step if query unsupported or failed. Will 133 * available. Skip this step if query unsupported or failed. Will
134 * reinitialize delayed at next vblank interrupt in that case. 134 * reinitialize delayed at next vblank interrupt in that case and
135 * assign 0 for now, to mark the vblanktimestamp as invalid.
135 */ 136 */
136 if (rc) { 137 tslot = atomic_read(&vblank->count) + diff;
137 tslot = atomic_read(&vblank->count) + diff; 138 vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
138 vblanktimestamp(dev, crtc, tslot) = t_vblank;
139 }
140 139
141 smp_mb__before_atomic(); 140 smp_mb__before_atomic();
142 atomic_add(diff, &vblank->count); 141 atomic_add(diff, &vblank->count);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3da1af46625c..773d1d24e604 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6074,6 +6074,8 @@ enum skl_disp_power_wells {
6074#define GTFIFOCTL 0x120008 6074#define GTFIFOCTL 0x120008
6075#define GT_FIFO_FREE_ENTRIES_MASK 0x7f 6075#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
6076#define GT_FIFO_NUM_RESERVED_ENTRIES 20 6076#define GT_FIFO_NUM_RESERVED_ENTRIES 20
6077#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
6078#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
6077 6079
6078#define HSW_IDICR 0x9008 6080#define HSW_IDICR 0x9008
6079#define IDIHASHMSK(x) (((x) & 0x3f) << 16) 6081#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d547d9c8dda2..d0f3cbc87474 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
13635}; 13635};
13636 13636
13637static struct intel_quirk intel_quirks[] = { 13637static struct intel_quirk intel_quirks[] = {
13638 /* HP Mini needs pipe A force quirk (LP: #322104) */
13639 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
13640
13641 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 13638 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
13642 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 13639 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
13643 13640
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d0237102c27e..f27346e907b1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1348 1348
1349 pipe_config->has_dp_encoder = true; 1349 pipe_config->has_dp_encoder = true;
1350 pipe_config->has_drrs = false; 1350 pipe_config->has_drrs = false;
1351 pipe_config->has_audio = intel_dp->has_audio; 1351 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1352 1352
1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2211 int dotclock; 2211 int dotclock;
2212 2212
2213 tmp = I915_READ(intel_dp->output_reg); 2213 tmp = I915_READ(intel_dp->output_reg);
2214 if (tmp & DP_AUDIO_OUTPUT_ENABLE) 2214
2215 pipe_config->has_audio = true; 2215 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2216 2216
2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
2218 if (tmp & DP_SYNC_HS_HIGH) 2218 if (tmp & DP_SYNC_HS_HIGH)
@@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3812 if (val == 0) 3812 if (val == 0)
3813 break; 3813 break;
3814 3814
3815 intel_dp->sink_rates[i] = val * 200; 3815 /* Value read is in kHz while drm clock is saved in deca-kHz */
3816 intel_dp->sink_rates[i] = (val * 200) / 10;
3816 } 3817 }
3817 intel_dp->num_sink_rates = i; 3818 intel_dp->num_sink_rates = i;
3818 } 3819 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5abda1d2c018..fbcc7dff0d63 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
813static const struct dmi_system_id intel_dual_link_lvds[] = { 813static const struct dmi_system_id intel_dual_link_lvds[] = {
814 { 814 {
815 .callback = intel_dual_link_lvds_callback, 815 .callback = intel_dual_link_lvds_callback,
816 .ident = "Apple MacBook Pro (Core i5/i7 Series)", 816 .ident = "Apple MacBook Pro 15\" (2010)",
817 .matches = {
818 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
819 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
820 },
821 },
822 {
823 .callback = intel_dual_link_lvds_callback,
824 .ident = "Apple MacBook Pro 15\" (2011)",
817 .matches = { 825 .matches = {
818 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 826 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
819 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), 827 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
820 }, 828 },
821 }, 829 },
830 {
831 .callback = intel_dual_link_lvds_callback,
832 .ident = "Apple MacBook Pro 15\" (2012)",
833 .matches = {
834 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
835 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
836 },
837 },
822 { } /* terminating entry */ 838 { } /* terminating entry */
823}; 839};
824 840
@@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
848 if (i915.lvds_channel_mode > 0) 864 if (i915.lvds_channel_mode > 0)
849 return i915.lvds_channel_mode == 2; 865 return i915.lvds_channel_mode == 2;
850 866
867 /* single channel LVDS is limited to 112 MHz */
868 if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
869 > 112999)
870 return true;
871
851 if (dmi_check_system(intel_dual_link_lvds)) 872 if (dmi_check_system(intel_dual_link_lvds))
852 return true; 873 return true;
853 874
@@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev)
1111out: 1132out:
1112 mutex_unlock(&dev->mode_config.mutex); 1133 mutex_unlock(&dev->mode_config.mutex);
1113 1134
1135 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1136
1114 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); 1137 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
1115 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1138 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1116 lvds_encoder->is_dual_link ? "dual" : "single"); 1139 lvds_encoder->is_dual_link ? "dual" : "single");
@@ -1125,7 +1148,6 @@ out:
1125 } 1148 }
1126 drm_connector_register(connector); 1149 drm_connector_register(connector);
1127 1150
1128 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1129 intel_panel_setup_backlight(connector, INVALID_PIPE); 1151 intel_panel_setup_backlight(connector, INVALID_PIPE);
1130 1152
1131 return; 1153 return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ab5cc94588e1..ff2a74651dd4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -360,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
360 __raw_i915_write32(dev_priv, GTFIFODBG, 360 __raw_i915_write32(dev_priv, GTFIFODBG,
361 __raw_i915_read32(dev_priv, GTFIFODBG)); 361 __raw_i915_read32(dev_priv, GTFIFODBG));
362 362
363 /* WaDisableShadowRegForCpd:chv */
364 if (IS_CHERRYVIEW(dev)) {
365 __raw_i915_write32(dev_priv, GTFIFOCTL,
366 __raw_i915_read32(dev_priv, GTFIFOCTL) |
367 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
368 GT_FIFO_CTL_RC6_POLICY_STALL);
369 }
370
363 intel_uncore_forcewake_reset(dev, restore_forcewake); 371 intel_uncore_forcewake_reset(dev, restore_forcewake);
364} 372}
365 373
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad24b31..42b2ea3fdcf3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
580 else 580 else
581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
582 582
583 /* if there is no audio, set MINM_OVER_MAXP */
584 if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
585 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
583 if (rdev->family < CHIP_RV770) 586 if (rdev->family < CHIP_RV770)
584 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 587 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
585 /* use frac fb div on APUs */ 588 /* use frac fb div on APUs */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index f57c1ab617bc..dd39f434b4a7 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1761,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1761 struct drm_device *dev = encoder->dev; 1761 struct drm_device *dev = encoder->dev;
1762 struct radeon_device *rdev = dev->dev_private; 1762 struct radeon_device *rdev = dev->dev_private;
1763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1764 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1765 int encoder_mode = atombios_get_encoder_mode(encoder); 1764 int encoder_mode = atombios_get_encoder_mode(encoder);
1766 1765
1767 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", 1766 DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
1768 radeon_encoder->encoder_id, mode, radeon_encoder->devices, 1767 radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1769 radeon_encoder->active_device); 1768 radeon_encoder->active_device);
1770 1769
1771 if (connector && (radeon_audio != 0) && 1770 if ((radeon_audio != 0) &&
1772 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 1771 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
1773 (ENCODER_MODE_IS_DP(encoder_mode) && 1772 ENCODER_MODE_IS_DP(encoder_mode)))
1774 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
1775 radeon_audio_dpms(encoder, mode); 1773 radeon_audio_dpms(encoder, mode);
1776 1774
1777 switch (radeon_encoder->encoder_id) { 1775 switch (radeon_encoder->encoder_id) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 3adc2afe32aa..68fd9fc677e3 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
295 WREG32(DCCG_AUDIO_DTO1_MODULE, clock); 295 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
296 } 296 }
297} 297}
298
299void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
300{
301 struct drm_device *dev = encoder->dev;
302 struct radeon_device *rdev = dev->dev_private;
303 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
304 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
305
306 if (!dig || !dig->afmt)
307 return;
308
309 if (enable) {
310 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
311 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
312 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
313 EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
314 EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
315 EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
316 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
317 } else {
318 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
319 }
320
321 dig->afmt->enabled = enable;
322}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index c18d4ecbd95d..0926739c9fa7 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
219 WREG32(AFMT_AVI_INFO3 + offset, 219 WREG32(AFMT_AVI_INFO3 + offset,
220 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); 220 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
221 221
222 WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
223 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
224 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
225
226 WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, 222 WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
227 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ 223 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
228 ~HDMI_AVI_INFO_LINE_MASK); 224 ~HDMI_AVI_INFO_LINE_MASK);
229} 225}
230 226
231void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, 227void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
370 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, 366 WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
371 AFMT_AUDIO_CHANNEL_ENABLE(0xff)); 367 AFMT_AUDIO_CHANNEL_ENABLE(0xff));
372 368
369 WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
370 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
371 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
372
373 /* allow 60958 channel status and send audio packets fields to be updated */ 373 /* allow 60958 channel status and send audio packets fields to be updated */
374 WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, 374 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
375 AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); 375 AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
376} 376}
377 377
378 378
@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
398 return; 398 return;
399 399
400 if (enable) { 400 if (enable) {
401 WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, 401 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
402 HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
403
404 WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
405 HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
406 HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
407 402
408 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 403 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
409 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ 404 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
410 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ 405 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
406 HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
407 HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
408 HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
409 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
410 AFMT_AUDIO_SAMPLE_SEND);
411 } else {
412 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
413 HDMI_AVI_INFO_SEND | /* enable AVI info frames */
414 HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
415 WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
416 ~AFMT_AUDIO_SAMPLE_SEND);
417 }
411 } else { 418 } else {
419 WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
420 ~AFMT_AUDIO_SAMPLE_SEND);
412 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); 421 WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
413 } 422 }
414 423
@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
424 struct radeon_device *rdev = dev->dev_private; 433 struct radeon_device *rdev = dev->dev_private;
425 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 434 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
426 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 435 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
436 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
427 437
428 if (!dig || !dig->afmt) 438 if (!dig || !dig->afmt)
429 return; 439 return;
430 440
431 if (enable) { 441 if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
432 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 442 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
433 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 443 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
434 struct radeon_connector_atom_dig *dig_connector; 444 struct radeon_connector_atom_dig *dig_connector;
435 uint32_t val; 445 uint32_t val;
436 446
447 WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
448 AFMT_AUDIO_SAMPLE_SEND);
449
437 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, 450 WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
438 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); 451 EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
439 452
440 if (radeon_connector->con_priv) { 453 if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
441 dig_connector = radeon_connector->con_priv; 454 dig_connector = radeon_connector->con_priv;
442 val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); 455 val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
443 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); 456 val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
457 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ 470 EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
458 } else { 471 } else {
459 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); 472 WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
473 WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
474 ~AFMT_AUDIO_SAMPLE_SEND);
460 } 475 }
461 476
462 dig->afmt->enabled = enable; 477 dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index dd6606b8e23c..e85894ade95c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
228 WREG32(HDMI0_AVI_INFO3 + offset, 228 WREG32(HDMI0_AVI_INFO3 + offset,
229 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); 229 frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
230 230
231 WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
232 HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
233
231 WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, 234 WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
232 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ 235 HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
233 HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ 236 HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
234 237
235 WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
236 HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
237} 238}
238 239
239/* 240/*
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d2abe481954f..46eb0fa75a61 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1673,7 +1673,6 @@ struct radeon_uvd {
1673 struct radeon_bo *vcpu_bo; 1673 struct radeon_bo *vcpu_bo;
1674 void *cpu_addr; 1674 void *cpu_addr;
1675 uint64_t gpu_addr; 1675 uint64_t gpu_addr;
1676 void *saved_bo;
1677 atomic_t handles[RADEON_MAX_UVD_HANDLES]; 1676 atomic_t handles[RADEON_MAX_UVD_HANDLES];
1678 struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; 1677 struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
1679 unsigned img_size[RADEON_MAX_UVD_HANDLES]; 1678 unsigned img_size[RADEON_MAX_UVD_HANDLES];
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index fafd8ce4d58f..8dbf5083c4ff 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
1202static struct radeon_asic_ring rv770_uvd_ring = { 1202static struct radeon_asic_ring rv770_uvd_ring = {
1203 .ib_execute = &uvd_v1_0_ib_execute, 1203 .ib_execute = &uvd_v1_0_ib_execute,
1204 .emit_fence = &uvd_v2_2_fence_emit, 1204 .emit_fence = &uvd_v2_2_fence_emit,
1205 .emit_semaphore = &uvd_v1_0_semaphore_emit, 1205 .emit_semaphore = &uvd_v2_2_semaphore_emit,
1206 .cs_parse = &radeon_uvd_cs_parse, 1206 .cs_parse = &radeon_uvd_cs_parse,
1207 .ring_test = &uvd_v1_0_ring_test, 1207 .ring_test = &uvd_v1_0_ring_test,
1208 .ib_test = &uvd_v1_0_ib_test, 1208 .ib_test = &uvd_v1_0_ib_test,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index cf0a90bb61ca..a3ca8cd305c5 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
949int uvd_v2_2_resume(struct radeon_device *rdev); 949int uvd_v2_2_resume(struct radeon_device *rdev);
950void uvd_v2_2_fence_emit(struct radeon_device *rdev, 950void uvd_v2_2_fence_emit(struct radeon_device *rdev,
951 struct radeon_fence *fence); 951 struct radeon_fence *fence);
952bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
953 struct radeon_ring *ring,
954 struct radeon_semaphore *semaphore,
955 bool emit_wait);
952 956
953/* uvd v3.1 */ 957/* uvd v3.1 */
954bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, 958bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 48d49e651a30..dcb779647c57 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); 102void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); 103void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
104void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); 104void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
105void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
106 105
107static const u32 pin_offsets[7] = 106static const u32 pin_offsets[7] =
108{ 107{
@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
240 .set_avi_packet = evergreen_set_avi_packet, 239 .set_avi_packet = evergreen_set_avi_packet,
241 .set_audio_packet = dce4_set_audio_packet, 240 .set_audio_packet = dce4_set_audio_packet,
242 .mode_set = radeon_audio_dp_mode_set, 241 .mode_set = radeon_audio_dp_mode_set,
243 .dpms = dce6_dp_enable, 242 .dpms = evergreen_dp_enable,
244}; 243};
245 244
246static void radeon_audio_interface_init(struct radeon_device *rdev) 245static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -461,30 +460,37 @@ void radeon_audio_detect(struct drm_connector *connector,
461 if (!connector || !connector->encoder) 460 if (!connector || !connector->encoder)
462 return; 461 return;
463 462
463 if (!radeon_encoder_is_digital(connector->encoder))
464 return;
465
464 rdev = connector->encoder->dev->dev_private; 466 rdev = connector->encoder->dev->dev_private;
467
468 if (!radeon_audio_chipset_supported(rdev))
469 return;
470
465 radeon_encoder = to_radeon_encoder(connector->encoder); 471 radeon_encoder = to_radeon_encoder(connector->encoder);
466 dig = radeon_encoder->enc_priv; 472 dig = radeon_encoder->enc_priv;
467 473
468 if (status == connector_status_connected) { 474 if (!dig->afmt)
469 struct radeon_connector *radeon_connector; 475 return;
470 int sink_type;
471
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476 476
477 radeon_connector = to_radeon_connector(connector); 477 if (status == connector_status_connected) {
478 sink_type = radeon_dp_getsinktype(radeon_connector); 478 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 479
480 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 480 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
481 sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 481 radeon_dp_getsinktype(radeon_connector) ==
482 CONNECTOR_OBJECT_ID_DISPLAYPORT)
482 radeon_encoder->audio = rdev->audio.dp_funcs; 483 radeon_encoder->audio = rdev->audio.dp_funcs;
483 else 484 else
484 radeon_encoder->audio = rdev->audio.hdmi_funcs; 485 radeon_encoder->audio = rdev->audio.hdmi_funcs;
485 486
486 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 487 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
487 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 488 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
489 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
490 } else {
491 radeon_audio_enable(rdev, dig->afmt->pin, 0);
492 dig->afmt->pin = NULL;
493 }
488 } else { 494 } else {
489 radeon_audio_enable(rdev, dig->afmt->pin, 0); 495 radeon_audio_enable(rdev, dig->afmt->pin, 0);
490 dig->afmt->pin = NULL; 496 dig->afmt->pin = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..d17d251dbd4f 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,10 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if (radeon_audio != 0) {
1383 radeon_connector_get_edid(connector);
1383 radeon_audio_detect(connector, ret); 1384 radeon_audio_detect(connector, ret);
1385 }
1384 1386
1385exit: 1387exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1388 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1719
1718 radeon_connector_update_scratch_regs(connector, ret); 1720 radeon_connector_update_scratch_regs(connector, ret);
1719 1721
1720 if (radeon_audio != 0) 1722 if (radeon_audio != 0) {
1723 radeon_connector_get_edid(connector);
1721 radeon_audio_detect(connector, ret); 1724 radeon_audio_detect(connector, ret);
1725 }
1722 1726
1723out: 1727out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1728 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 4d0f96cc3da4..ab39b85e0f76 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
88 p->dma_reloc_idx = 0; 88 p->dma_reloc_idx = 0;
89 /* FIXME: we assume that each relocs use 4 dwords */ 89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4; 90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); 91 p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
92 if (p->relocs == NULL) { 92 if (p->relocs == NULL) {
93 return -ENOMEM; 93 return -ENOMEM;
94 } 94 }
@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
428 } 428 }
429 } 429 }
430 kfree(parser->track); 430 kfree(parser->track);
431 kfree(parser->relocs); 431 drm_free_large(parser->relocs);
432 drm_free_large(parser->vm_bos); 432 drm_free_large(parser->vm_bos);
433 for (i = 0; i < parser->nchunks; i++) 433 for (i = 0; i < parser->nchunks; i++)
434 drm_free_large(parser->chunks[i].kdata); 434 drm_free_large(parser->chunks[i].kdata);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 01701376b239..eef006c48584 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -135,28 +135,31 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
135 while (it) { 135 while (it) {
136 struct radeon_mn_node *node; 136 struct radeon_mn_node *node;
137 struct radeon_bo *bo; 137 struct radeon_bo *bo;
138 int r; 138 long r;
139 139
140 node = container_of(it, struct radeon_mn_node, it); 140 node = container_of(it, struct radeon_mn_node, it);
141 it = interval_tree_iter_next(it, start, end); 141 it = interval_tree_iter_next(it, start, end);
142 142
143 list_for_each_entry(bo, &node->bos, mn_list) { 143 list_for_each_entry(bo, &node->bos, mn_list) {
144 144
145 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
146 continue;
147
145 r = radeon_bo_reserve(bo, true); 148 r = radeon_bo_reserve(bo, true);
146 if (r) { 149 if (r) {
147 DRM_ERROR("(%d) failed to reserve user bo\n", r); 150 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
148 continue; 151 continue;
149 } 152 }
150 153
151 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 154 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
152 true, false, MAX_SCHEDULE_TIMEOUT); 155 true, false, MAX_SCHEDULE_TIMEOUT);
153 if (r) 156 if (r <= 0)
154 DRM_ERROR("(%d) failed to wait for user bo\n", r); 157 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
155 158
156 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 159 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
157 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 160 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
158 if (r) 161 if (r)
159 DRM_ERROR("(%d) failed to validate user bo\n", r); 162 DRM_ERROR("(%ld) failed to validate user bo\n", r);
160 163
161 radeon_bo_unreserve(bo); 164 radeon_bo_unreserve(bo);
162 } 165 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b292aca0f342..edafd3c2b170 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
591{ 591{
592 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); 592 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
593 struct radeon_ttm_tt *gtt = (void *)ttm; 593 struct radeon_ttm_tt *gtt = (void *)ttm;
594 struct scatterlist *sg; 594 struct sg_page_iter sg_iter;
595 int i;
596 595
597 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); 596 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
598 enum dma_data_direction direction = write ? 597 enum dma_data_direction direction = write ?
@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
605 /* free the sg table and pages again */ 604 /* free the sg table and pages again */
606 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 605 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
607 606
608 for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { 607 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
609 struct page *page = sg_page(sg); 608 struct page *page = sg_page_iter_page(&sg_iter);
610
611 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) 609 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
612 set_page_dirty(page); 610 set_page_dirty(page);
613 611
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index c10b2aec6450..6edcb5485092 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
204 204
205int radeon_uvd_suspend(struct radeon_device *rdev) 205int radeon_uvd_suspend(struct radeon_device *rdev)
206{ 206{
207 unsigned size; 207 int i, r;
208 void *ptr;
209 int i;
210 208
211 if (rdev->uvd.vcpu_bo == NULL) 209 if (rdev->uvd.vcpu_bo == NULL)
212 return 0; 210 return 0;
213 211
214 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) 212 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
215 if (atomic_read(&rdev->uvd.handles[i])) 213 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
216 break; 214 if (handle != 0) {
215 struct radeon_fence *fence;
217 216
218 if (i == RADEON_MAX_UVD_HANDLES) 217 radeon_uvd_note_usage(rdev);
219 return 0;
220 218
221 size = radeon_bo_size(rdev->uvd.vcpu_bo); 219 r = radeon_uvd_get_destroy_msg(rdev,
222 size -= rdev->uvd_fw->size; 220 R600_RING_TYPE_UVD_INDEX, handle, &fence);
221 if (r) {
222 DRM_ERROR("Error destroying UVD (%d)!\n", r);
223 continue;
224 }
223 225
224 ptr = rdev->uvd.cpu_addr; 226 radeon_fence_wait(fence, false);
225 ptr += rdev->uvd_fw->size; 227 radeon_fence_unref(&fence);
226 228
227 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); 229 rdev->uvd.filp[i] = NULL;
228 memcpy(rdev->uvd.saved_bo, ptr, size); 230 atomic_set(&rdev->uvd.handles[i], 0);
231 }
232 }
229 233
230 return 0; 234 return 0;
231} 235}
@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
246 ptr = rdev->uvd.cpu_addr; 250 ptr = rdev->uvd.cpu_addr;
247 ptr += rdev->uvd_fw->size; 251 ptr += rdev->uvd_fw->size;
248 252
249 if (rdev->uvd.saved_bo != NULL) { 253 memset(ptr, 0, size);
250 memcpy(ptr, rdev->uvd.saved_bo, size);
251 kfree(rdev->uvd.saved_bo);
252 rdev->uvd.saved_bo = NULL;
253 } else
254 memset(ptr, 0, size);
255 254
256 return 0; 255 return 0;
257} 256}
@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
396 return 0; 395 return 0;
397} 396}
398 397
398static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
399 unsigned stream_type)
400{
401 switch (stream_type) {
402 case 0: /* H264 */
403 case 1: /* VC1 */
404 /* always supported */
405 return 0;
406
407 case 3: /* MPEG2 */
408 case 4: /* MPEG4 */
409 /* only since UVD 3 */
410 if (p->rdev->family >= CHIP_PALM)
411 return 0;
412
413 /* fall through */
414 default:
415 DRM_ERROR("UVD codec not supported by hardware %d!\n",
416 stream_type);
417 return -EINVAL;
418 }
419}
420
399static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, 421static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
400 unsigned offset, unsigned buf_sizes[]) 422 unsigned offset, unsigned buf_sizes[])
401{ 423{
@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
436 return -EINVAL; 458 return -EINVAL;
437 } 459 }
438 460
439 if (msg_type == 1) { 461 switch (msg_type) {
440 /* it's a decode msg, calc buffer sizes */ 462 case 0:
441 r = radeon_uvd_cs_msg_decode(msg, buf_sizes); 463 /* it's a create msg, calc image size (width * height) */
442 /* calc image size (width * height) */ 464 img_size = msg[7] * msg[8];
443 img_size = msg[6] * msg[7]; 465
466 r = radeon_uvd_validate_codec(p, msg[4]);
444 radeon_bo_kunmap(bo); 467 radeon_bo_kunmap(bo);
445 if (r) 468 if (r)
446 return r; 469 return r;
447 470
448 } else if (msg_type == 2) { 471 /* try to alloc a new handle */
472 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
473 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
474 DRM_ERROR("Handle 0x%x already in use!\n", handle);
475 return -EINVAL;
476 }
477
478 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
479 p->rdev->uvd.filp[i] = p->filp;
480 p->rdev->uvd.img_size[i] = img_size;
481 return 0;
482 }
483 }
484
485 DRM_ERROR("No more free UVD handles!\n");
486 return -EINVAL;
487
488 case 1:
489 /* it's a decode msg, validate codec and calc buffer sizes */
490 r = radeon_uvd_validate_codec(p, msg[4]);
491 if (!r)
492 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
493 radeon_bo_kunmap(bo);
494 if (r)
495 return r;
496
497 /* validate the handle */
498 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
499 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
500 if (p->rdev->uvd.filp[i] != p->filp) {
501 DRM_ERROR("UVD handle collision detected!\n");
502 return -EINVAL;
503 }
504 return 0;
505 }
506 }
507
508 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
509 return -ENOENT;
510
511 case 2:
449 /* it's a destroy msg, free the handle */ 512 /* it's a destroy msg, free the handle */
450 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) 513 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
451 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); 514 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
452 radeon_bo_kunmap(bo); 515 radeon_bo_kunmap(bo);
453 return 0; 516 return 0;
454 } else {
455 /* it's a create msg, calc image size (width * height) */
456 img_size = msg[7] * msg[8];
457 radeon_bo_kunmap(bo);
458 517
459 if (msg_type != 0) { 518 default:
460 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
461 return -EINVAL;
462 }
463
464 /* it's a create msg, no special handling needed */
465 }
466
467 /* create or decode, validate the handle */
468 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
469 if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
470 return 0;
471 }
472 519
473 /* handle not found try to alloc a new one */ 520 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
474 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { 521 return -EINVAL;
475 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
476 p->rdev->uvd.filp[i] = p->filp;
477 p->rdev->uvd.img_size[i] = img_size;
478 return 0;
479 }
480 } 522 }
481 523
482 DRM_ERROR("No more free UVD handles!\n"); 524 BUG();
483 return -EINVAL; 525 return -EINVAL;
484} 526}
485 527
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 24f849f888bb..0de5711ac508 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
493 * 493 *
494 * @p: parser context 494 * @p: parser context
495 * @handle: handle to validate 495 * @handle: handle to validate
496 * @allocated: allocated a new handle?
496 * 497 *
497 * Validates the handle and return the found session index or -EINVAL 498 * Validates the handle and return the found session index or -EINVAL
498 * we we don't have another free session index. 499 * we we don't have another free session index.
499 */ 500 */
500int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) 501static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
502 uint32_t handle, bool *allocated)
501{ 503{
502 unsigned i; 504 unsigned i;
503 505
506 *allocated = false;
507
504 /* validate the handle */ 508 /* validate the handle */
505 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 509 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
506 if (atomic_read(&p->rdev->vce.handles[i]) == handle) 510 if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
511 if (p->rdev->vce.filp[i] != p->filp) {
512 DRM_ERROR("VCE handle collision detected!\n");
513 return -EINVAL;
514 }
507 return i; 515 return i;
516 }
508 } 517 }
509 518
510 /* handle not found try to alloc a new one */ 519 /* handle not found try to alloc a new one */
@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
512 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { 521 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
513 p->rdev->vce.filp[i] = p->filp; 522 p->rdev->vce.filp[i] = p->filp;
514 p->rdev->vce.img_size[i] = 0; 523 p->rdev->vce.img_size[i] = 0;
524 *allocated = true;
515 return i; 525 return i;
516 } 526 }
517 } 527 }
@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
529int radeon_vce_cs_parse(struct radeon_cs_parser *p) 539int radeon_vce_cs_parse(struct radeon_cs_parser *p)
530{ 540{
531 int session_idx = -1; 541 int session_idx = -1;
532 bool destroyed = false; 542 bool destroyed = false, created = false, allocated = false;
533 uint32_t tmp, handle = 0; 543 uint32_t tmp, handle = 0;
534 uint32_t *size = &tmp; 544 uint32_t *size = &tmp;
535 int i, r; 545 int i, r = 0;
536 546
537 while (p->idx < p->chunk_ib->length_dw) { 547 while (p->idx < p->chunk_ib->length_dw) {
538 uint32_t len = radeon_get_ib_value(p, p->idx); 548 uint32_t len = radeon_get_ib_value(p, p->idx);
@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
540 550
541 if ((len < 8) || (len & 3)) { 551 if ((len < 8) || (len & 3)) {
542 DRM_ERROR("invalid VCE command length (%d)!\n", len); 552 DRM_ERROR("invalid VCE command length (%d)!\n", len);
543 return -EINVAL; 553 r = -EINVAL;
554 goto out;
544 } 555 }
545 556
546 if (destroyed) { 557 if (destroyed) {
547 DRM_ERROR("No other command allowed after destroy!\n"); 558 DRM_ERROR("No other command allowed after destroy!\n");
548 return -EINVAL; 559 r = -EINVAL;
560 goto out;
549 } 561 }
550 562
551 switch (cmd) { 563 switch (cmd) {
552 case 0x00000001: // session 564 case 0x00000001: // session
553 handle = radeon_get_ib_value(p, p->idx + 2); 565 handle = radeon_get_ib_value(p, p->idx + 2);
554 session_idx = radeon_vce_validate_handle(p, handle); 566 session_idx = radeon_vce_validate_handle(p, handle,
567 &allocated);
555 if (session_idx < 0) 568 if (session_idx < 0)
556 return session_idx; 569 return session_idx;
557 size = &p->rdev->vce.img_size[session_idx]; 570 size = &p->rdev->vce.img_size[session_idx];
@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
561 break; 574 break;
562 575
563 case 0x01000001: // create 576 case 0x01000001: // create
577 created = true;
578 if (!allocated) {
579 DRM_ERROR("Handle already in use!\n");
580 r = -EINVAL;
581 goto out;
582 }
583
564 *size = radeon_get_ib_value(p, p->idx + 8) * 584 *size = radeon_get_ib_value(p, p->idx + 8) *
565 radeon_get_ib_value(p, p->idx + 10) * 585 radeon_get_ib_value(p, p->idx + 10) *
566 8 * 3 / 2; 586 8 * 3 / 2;
@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
578 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, 598 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
579 *size); 599 *size);
580 if (r) 600 if (r)
581 return r; 601 goto out;
582 602
583 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, 603 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
584 *size / 3); 604 *size / 3);
585 if (r) 605 if (r)
586 return r; 606 goto out;
587 break; 607 break;
588 608
589 case 0x02000001: // destroy 609 case 0x02000001: // destroy
@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
594 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 614 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
595 *size * 2); 615 *size * 2);
596 if (r) 616 if (r)
597 return r; 617 goto out;
598 break; 618 break;
599 619
600 case 0x05000004: // video bitstream buffer 620 case 0x05000004: // video bitstream buffer
@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
602 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 622 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
603 tmp); 623 tmp);
604 if (r) 624 if (r)
605 return r; 625 goto out;
606 break; 626 break;
607 627
608 case 0x05000005: // feedback buffer 628 case 0x05000005: // feedback buffer
609 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 629 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
610 4096); 630 4096);
611 if (r) 631 if (r)
612 return r; 632 goto out;
613 break; 633 break;
614 634
615 default: 635 default:
616 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 636 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
617 return -EINVAL; 637 r = -EINVAL;
638 goto out;
618 } 639 }
619 640
620 if (session_idx == -1) { 641 if (session_idx == -1) {
621 DRM_ERROR("no session command at start of IB\n"); 642 DRM_ERROR("no session command at start of IB\n");
622 return -EINVAL; 643 r = -EINVAL;
644 goto out;
623 } 645 }
624 646
625 p->idx += len / 4; 647 p->idx += len / 4;
626 } 648 }
627 649
628 if (destroyed) { 650 if (allocated && !created) {
629 /* IB contains a destroy msg, free the handle */ 651 DRM_ERROR("New session without create command!\n");
652 r = -ENOENT;
653 }
654
655out:
656 if ((!r && destroyed) || (r && allocated)) {
657 /*
658 * IB contains a destroy msg or we have allocated an
659 * handle and got an error, anyway free the handle
660 */
630 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 661 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
631 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 662 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
632 } 663 }
633 664
634 return 0; 665 return r;
635} 666}
636 667
637/** 668/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2a5a4a9e772d..de42fc4a22b8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
473 } 473 }
474 474
475 mutex_lock(&vm->mutex); 475 mutex_lock(&vm->mutex);
476 soffset /= RADEON_GPU_PAGE_SIZE;
477 eoffset /= RADEON_GPU_PAGE_SIZE;
478 if (soffset || eoffset) {
479 struct interval_tree_node *it;
480 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
481 if (it && it != &bo_va->it) {
482 struct radeon_bo_va *tmp;
483 tmp = container_of(it, struct radeon_bo_va, it);
484 /* bo and tmp overlap, invalid offset */
485 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
486 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
487 soffset, tmp->bo, tmp->it.start, tmp->it.last);
488 mutex_unlock(&vm->mutex);
489 return -EINVAL;
490 }
491 }
492
476 if (bo_va->it.start || bo_va->it.last) { 493 if (bo_va->it.start || bo_va->it.last) {
477 if (bo_va->addr) { 494 if (bo_va->addr) {
478 /* add a clone of the bo_va to clear the old address */ 495 /* add a clone of the bo_va to clear the old address */
@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
490 spin_lock(&vm->status_lock); 507 spin_lock(&vm->status_lock);
491 list_add(&tmp->vm_status, &vm->freed); 508 list_add(&tmp->vm_status, &vm->freed);
492 spin_unlock(&vm->status_lock); 509 spin_unlock(&vm->status_lock);
510
511 bo_va->addr = 0;
493 } 512 }
494 513
495 interval_tree_remove(&bo_va->it, &vm->va); 514 interval_tree_remove(&bo_va->it, &vm->va);
@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
497 bo_va->it.last = 0; 516 bo_va->it.last = 0;
498 } 517 }
499 518
500 soffset /= RADEON_GPU_PAGE_SIZE;
501 eoffset /= RADEON_GPU_PAGE_SIZE;
502 if (soffset || eoffset) { 519 if (soffset || eoffset) {
503 struct interval_tree_node *it;
504 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
505 if (it) {
506 struct radeon_bo_va *tmp;
507 tmp = container_of(it, struct radeon_bo_va, it);
508 /* bo and tmp overlap, invalid offset */
509 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
510 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
511 soffset, tmp->bo, tmp->it.start, tmp->it.last);
512 mutex_unlock(&vm->mutex);
513 return -EINVAL;
514 }
515 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
516 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset - 1;
517 interval_tree_insert(&bo_va->it, &vm->va); 522 interval_tree_insert(&bo_va->it, &vm->va);
@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
1107 list_del(&bo_va->bo_list); 1112 list_del(&bo_va->bo_list);
1108 1113
1109 mutex_lock(&vm->mutex); 1114 mutex_lock(&vm->mutex);
1110 interval_tree_remove(&bo_va->it, &vm->va); 1115 if (bo_va->it.start || bo_va->it.last)
1116 interval_tree_remove(&bo_va->it, &vm->va);
1111 spin_lock(&vm->status_lock); 1117 spin_lock(&vm->status_lock);
1112 list_del(&bo_va->vm_status); 1118 list_del(&bo_va->vm_status);
1113 1119
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 3cf1e2921545..9ef2064b1c9c 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -989,6 +989,9 @@
989 ((n) & 0x3FFF) << 16) 989 ((n) & 0x3FFF) << 16)
990 990
991/* UVD */ 991/* UVD */
992#define UVD_SEMA_ADDR_LOW 0xef00
993#define UVD_SEMA_ADDR_HIGH 0xef04
994#define UVD_SEMA_CMD 0xef08
992#define UVD_GPCOM_VCPU_CMD 0xef0c 995#define UVD_GPCOM_VCPU_CMD 0xef0c
993#define UVD_GPCOM_VCPU_DATA0 0xef10 996#define UVD_GPCOM_VCPU_DATA0 0xef10
994#define UVD_GPCOM_VCPU_DATA1 0xef14 997#define UVD_GPCOM_VCPU_DATA1 0xef14
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index b35bccfeef79..ff8b83f5e929 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
2924static struct si_dpm_quirk si_dpm_quirk_list[] = { 2924static struct si_dpm_quirk si_dpm_quirk_list[] = {
2925 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 2925 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2926 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2926 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2927 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2927 { 0, 0, 0, 0 }, 2928 { 0, 0, 0, 0 },
2928}; 2929};
2929 2930
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index e72b3cb59358..c6b1cbca47fc 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
466 struct radeon_semaphore *semaphore, 466 struct radeon_semaphore *semaphore,
467 bool emit_wait) 467 bool emit_wait)
468{ 468{
469 uint64_t addr = semaphore->gpu_addr; 469 /* disable semaphores for UVD V1 hardware */
470 470 return false;
471 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
472 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
473
474 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
475 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
476
477 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
478 radeon_ring_write(ring, emit_wait ? 1 : 0);
479
480 return true;
481} 471}
482 472
483/** 473/**
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 89193519f8a1..7ed778cec7c6 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
60} 60}
61 61
62/** 62/**
63 * uvd_v2_2_semaphore_emit - emit semaphore command
64 *
65 * @rdev: radeon_device pointer
66 * @ring: radeon_ring pointer
67 * @semaphore: semaphore to emit commands for
68 * @emit_wait: true if we should emit a wait command
69 *
70 * Emit a semaphore command (either wait or signal) to the UVD ring.
71 */
72bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
73 struct radeon_ring *ring,
74 struct radeon_semaphore *semaphore,
75 bool emit_wait)
76{
77 uint64_t addr = semaphore->gpu_addr;
78
79 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
80 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
81
82 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
83 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
84
85 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
86 radeon_ring_write(ring, emit_wait ? 1 : 0);
87
88 return true;
89}
90
91/**
63 * uvd_v2_2_resume - memory controller programming 92 * uvd_v2_2_resume - memory controller programming
64 * 93 *
65 * @rdev: radeon_device pointer 94 * @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ccb0ce073ef2..4557f335a8a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1409,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1409 struct vop *vop; 1409 struct vop *vop;
1410 struct resource *res; 1410 struct resource *res;
1411 size_t alloc_size; 1411 size_t alloc_size;
1412 int ret; 1412 int ret, irq;
1413 1413
1414 of_id = of_match_device(vop_driver_dt_match, dev); 1414 of_id = of_match_device(vop_driver_dt_match, dev);
1415 vop_data = of_id->data; 1415 vop_data = of_id->data;
@@ -1445,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1445 return ret; 1445 return ret;
1446 } 1446 }
1447 1447
1448 vop->irq = platform_get_irq(pdev, 0); 1448 irq = platform_get_irq(pdev, 0);
1449 if (vop->irq < 0) { 1449 if (irq < 0) {
1450 dev_err(dev, "cannot find irq for vop\n"); 1450 dev_err(dev, "cannot find irq for vop\n");
1451 return vop->irq; 1451 return irq;
1452 } 1452 }
1453 vop->irq = (unsigned int)irq;
1453 1454
1454 spin_lock_init(&vop->reg_lock); 1455 spin_lock_init(&vop->reg_lock);
1455 spin_lock_init(&vop->irq_lock); 1456 spin_lock_init(&vop->irq_lock);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 1833abd7d3aa..bfad15a913a0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
173 drm->irq_enabled = true; 173 drm->irq_enabled = true;
174 174
175 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 175 /* syncpoints are used for full 32-bit hardware VBLANK counters */
176 drm->vblank_disable_immediate = true;
177 drm->max_vblank_count = 0xffffffff; 176 drm->max_vblank_count = 0xffffffff;
178 177
179 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 178 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index a04c49f2a011..39ea67f9b066 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -643,15 +643,6 @@ config BLK_DEV_TC86C001
643 help 643 help
644 This driver adds support for Toshiba TC86C001 GOKU-S chip. 644 This driver adds support for Toshiba TC86C001 GOKU-S chip.
645 645
646config BLK_DEV_CELLEB
647 tristate "Toshiba's Cell Reference Set IDE support"
648 depends on PPC_CELLEB
649 select BLK_DEV_IDEDMA_PCI
650 help
651 This driver provides support for the on-board IDE controller on
652 Toshiba Cell Reference Board.
653 If unsure, say Y.
654
655endif 646endif
656 647
657# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF 648# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index a04ee82f1c8f..2a8c417d4081 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
38obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o 38obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
39obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o 39obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
40obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o 40obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
41obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
42obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o 41obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
43obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o 42obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
44obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o 43obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
deleted file mode 100644
index 2a2d188b5d5b..000000000000
--- a/drivers/ide/scc_pata.c
+++ /dev/null
@@ -1,887 +0,0 @@
1/*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/delay.h>
29#include <linux/ide.h>
30#include <linux/init.h>
31
32#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
33
34#define SCC_PATA_NAME "scc IDE"
35
36#define TDVHSEL_MASTER 0x00000001
37#define TDVHSEL_SLAVE 0x00000004
38
39#define MODE_JCUSFEN 0x00000080
40
41#define CCKCTRL_ATARESET 0x00040000
42#define CCKCTRL_BUFCNT 0x00020000
43#define CCKCTRL_CRST 0x00010000
44#define CCKCTRL_OCLKEN 0x00000100
45#define CCKCTRL_ATACLKOEN 0x00000002
46#define CCKCTRL_LCLKEN 0x00000001
47
48#define QCHCD_IOS_SS 0x00000001
49
50#define QCHSD_STPDIAG 0x00020000
51
52#define INTMASK_MSK 0xD1000012
53#define INTSTS_SERROR 0x80000000
54#define INTSTS_PRERR 0x40000000
55#define INTSTS_RERR 0x10000000
56#define INTSTS_ICERR 0x01000000
57#define INTSTS_BMSINT 0x00000010
58#define INTSTS_BMHE 0x00000008
59#define INTSTS_IOIRQS 0x00000004
60#define INTSTS_INTRQ 0x00000002
61#define INTSTS_ACTEINT 0x00000001
62
63#define ECMODE_VALUE 0x01
64
65static struct scc_ports {
66 unsigned long ctl, dma;
67 struct ide_host *host; /* for removing port from system */
68} scc_ports[MAX_HWIFS];
69
70/* PIO transfer mode table */
71/* JCHST */
72static unsigned long JCHSTtbl[2][7] = {
73 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
74 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
75};
76
77/* JCHHT */
78static unsigned long JCHHTtbl[2][7] = {
79 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
80 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
81};
82
83/* JCHCT */
84static unsigned long JCHCTtbl[2][7] = {
85 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
86 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
87};
88
89
90/* DMA transfer mode table */
91/* JCHDCTM/JCHDCTS */
92static unsigned long JCHDCTxtbl[2][7] = {
93 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
94 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
95};
96
97/* JCSTWTM/JCSTWTS */
98static unsigned long JCSTWTxtbl[2][7] = {
99 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
100 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
101};
102
103/* JCTSS */
104static unsigned long JCTSStbl[2][7] = {
105 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
107};
108
109/* JCENVT */
110static unsigned long JCENVTtbl[2][7] = {
111 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
112 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
113};
114
115/* JCACTSELS/JCACTSELM */
116static unsigned long JCACTSELtbl[2][7] = {
117 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
118 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
119};
120
121
122static u8 scc_ide_inb(unsigned long port)
123{
124 u32 data = in_be32((void*)port);
125 return (u8)data;
126}
127
128static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
129{
130 out_be32((void *)hwif->io_ports.command_addr, cmd);
131 eieio();
132 in_be32((void *)(hwif->dma_base + 0x01c));
133 eieio();
134}
135
136static u8 scc_read_status(ide_hwif_t *hwif)
137{
138 return (u8)in_be32((void *)hwif->io_ports.status_addr);
139}
140
141static u8 scc_read_altstatus(ide_hwif_t *hwif)
142{
143 return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
144}
145
146static u8 scc_dma_sff_read_status(ide_hwif_t *hwif)
147{
148 return (u8)in_be32((void *)(hwif->dma_base + 4));
149}
150
151static void scc_write_devctl(ide_hwif_t *hwif, u8 ctl)
152{
153 out_be32((void *)hwif->io_ports.ctl_addr, ctl);
154 eieio();
155 in_be32((void *)(hwif->dma_base + 0x01c));
156 eieio();
157}
158
159static void scc_ide_insw(unsigned long port, void *addr, u32 count)
160{
161 u16 *ptr = (u16 *)addr;
162 while (count--) {
163 *ptr++ = le16_to_cpu(in_be32((void*)port));
164 }
165}
166
167static void scc_ide_insl(unsigned long port, void *addr, u32 count)
168{
169 u16 *ptr = (u16 *)addr;
170 while (count--) {
171 *ptr++ = le16_to_cpu(in_be32((void*)port));
172 *ptr++ = le16_to_cpu(in_be32((void*)port));
173 }
174}
175
176static void scc_ide_outb(u8 addr, unsigned long port)
177{
178 out_be32((void*)port, addr);
179}
180
181static void
182scc_ide_outsw(unsigned long port, void *addr, u32 count)
183{
184 u16 *ptr = (u16 *)addr;
185 while (count--) {
186 out_be32((void*)port, cpu_to_le16(*ptr++));
187 }
188}
189
190static void
191scc_ide_outsl(unsigned long port, void *addr, u32 count)
192{
193 u16 *ptr = (u16 *)addr;
194 while (count--) {
195 out_be32((void*)port, cpu_to_le16(*ptr++));
196 out_be32((void*)port, cpu_to_le16(*ptr++));
197 }
198}
199
200/**
201 * scc_set_pio_mode - set host controller for PIO mode
202 * @hwif: port
203 * @drive: drive
204 *
205 * Load the timing settings for this device mode into the
206 * controller.
207 */
208
209static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
210{
211 struct scc_ports *ports = ide_get_hwifdata(hwif);
212 unsigned long ctl_base = ports->ctl;
213 unsigned long cckctrl_port = ctl_base + 0xff0;
214 unsigned long piosht_port = ctl_base + 0x000;
215 unsigned long pioct_port = ctl_base + 0x004;
216 unsigned long reg;
217 int offset;
218 const u8 pio = drive->pio_mode - XFER_PIO_0;
219
220 reg = in_be32((void __iomem *)cckctrl_port);
221 if (reg & CCKCTRL_ATACLKOEN) {
222 offset = 1; /* 133MHz */
223 } else {
224 offset = 0; /* 100MHz */
225 }
226 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
227 out_be32((void __iomem *)piosht_port, reg);
228 reg = JCHCTtbl[offset][pio];
229 out_be32((void __iomem *)pioct_port, reg);
230}
231
232/**
233 * scc_set_dma_mode - set host controller for DMA mode
234 * @hwif: port
235 * @drive: drive
236 *
237 * Load the timing settings for this device mode into the
238 * controller.
239 */
240
241static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
242{
243 struct scc_ports *ports = ide_get_hwifdata(hwif);
244 unsigned long ctl_base = ports->ctl;
245 unsigned long cckctrl_port = ctl_base + 0xff0;
246 unsigned long mdmact_port = ctl_base + 0x008;
247 unsigned long mcrcst_port = ctl_base + 0x00c;
248 unsigned long sdmact_port = ctl_base + 0x010;
249 unsigned long scrcst_port = ctl_base + 0x014;
250 unsigned long udenvt_port = ctl_base + 0x018;
251 unsigned long tdvhsel_port = ctl_base + 0x020;
252 int is_slave = drive->dn & 1;
253 int offset, idx;
254 unsigned long reg;
255 unsigned long jcactsel;
256 const u8 speed = drive->dma_mode;
257
258 reg = in_be32((void __iomem *)cckctrl_port);
259 if (reg & CCKCTRL_ATACLKOEN) {
260 offset = 1; /* 133MHz */
261 } else {
262 offset = 0; /* 100MHz */
263 }
264
265 idx = speed - XFER_UDMA_0;
266
267 jcactsel = JCACTSELtbl[offset][idx];
268 if (is_slave) {
269 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
270 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
271 jcactsel = jcactsel << 2;
272 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
273 } else {
274 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
275 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
276 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
277 }
278 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
279 out_be32((void __iomem *)udenvt_port, reg);
280}
281
282static void scc_dma_host_set(ide_drive_t *drive, int on)
283{
284 ide_hwif_t *hwif = drive->hwif;
285 u8 unit = drive->dn & 1;
286 u8 dma_stat = scc_dma_sff_read_status(hwif);
287
288 if (on)
289 dma_stat |= (1 << (5 + unit));
290 else
291 dma_stat &= ~(1 << (5 + unit));
292
293 scc_ide_outb(dma_stat, hwif->dma_base + 4);
294}
295
296/**
297 * scc_dma_setup - begin a DMA phase
298 * @drive: target device
299 * @cmd: command
300 *
301 * Build an IDE DMA PRD (IDE speak for scatter gather table)
302 * and then set up the DMA transfer registers.
303 *
304 * Returns 0 on success. If a PIO fallback is required then 1
305 * is returned.
306 */
307
308static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
309{
310 ide_hwif_t *hwif = drive->hwif;
311 u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
312 u8 dma_stat;
313
314 /* fall back to pio! */
315 if (ide_build_dmatable(drive, cmd) == 0)
316 return 1;
317
318 /* PRD table */
319 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
320
321 /* specify r/w */
322 out_be32((void __iomem *)hwif->dma_base, rw);
323
324 /* read DMA status for INTR & ERROR flags */
325 dma_stat = scc_dma_sff_read_status(hwif);
326
327 /* clear INTR & ERROR flags */
328 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
329
330 return 0;
331}
332
333static void scc_dma_start(ide_drive_t *drive)
334{
335 ide_hwif_t *hwif = drive->hwif;
336 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
337
338 /* start DMA */
339 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
340}
341
342static int __scc_dma_end(ide_drive_t *drive)
343{
344 ide_hwif_t *hwif = drive->hwif;
345 u8 dma_stat, dma_cmd;
346
347 /* get DMA command mode */
348 dma_cmd = scc_ide_inb(hwif->dma_base);
349 /* stop DMA */
350 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
351 /* get DMA status */
352 dma_stat = scc_dma_sff_read_status(hwif);
353 /* clear the INTR & ERROR bits */
354 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
355 /* verify good DMA status */
356 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
357}
358
359/**
360 * scc_dma_end - Stop DMA
361 * @drive: IDE drive
362 *
363 * Check and clear INT Status register.
364 * Then call __scc_dma_end().
365 */
366
367static int scc_dma_end(ide_drive_t *drive)
368{
369 ide_hwif_t *hwif = drive->hwif;
370 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
371 unsigned long intsts_port = hwif->dma_base + 0x014;
372 u32 reg;
373 int dma_stat, data_loss = 0;
374 static int retry = 0;
375
376 /* errata A308 workaround: Step5 (check data loss) */
377 /* We don't check non ide_disk because it is limited to UDMA4 */
378 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
379 & ATA_ERR) &&
380 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
381 reg = in_be32((void __iomem *)intsts_port);
382 if (!(reg & INTSTS_ACTEINT)) {
383 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
384 drive->name);
385 data_loss = 1;
386 if (retry++) {
387 struct request *rq = hwif->rq;
388 ide_drive_t *drive;
389 int i;
390
391 /* ERROR_RESET and drive->crc_count are needed
392 * to reduce DMA transfer mode in retry process.
393 */
394 if (rq)
395 rq->errors |= ERROR_RESET;
396
397 ide_port_for_each_dev(i, drive, hwif)
398 drive->crc_count++;
399 }
400 }
401 }
402
403 while (1) {
404 reg = in_be32((void __iomem *)intsts_port);
405
406 if (reg & INTSTS_SERROR) {
407 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
408 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
409
410 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
411 continue;
412 }
413
414 if (reg & INTSTS_PRERR) {
415 u32 maea0, maec0;
416 unsigned long ctl_base = hwif->config_data;
417
418 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
419 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
420
421 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
422
423 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
424
425 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
426 continue;
427 }
428
429 if (reg & INTSTS_RERR) {
430 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
431 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
432
433 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
434 continue;
435 }
436
437 if (reg & INTSTS_ICERR) {
438 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
439
440 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
441 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
442 continue;
443 }
444
445 if (reg & INTSTS_BMSINT) {
446 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
447 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
448
449 ide_do_reset(drive);
450 continue;
451 }
452
453 if (reg & INTSTS_BMHE) {
454 out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
455 continue;
456 }
457
458 if (reg & INTSTS_ACTEINT) {
459 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
460 continue;
461 }
462
463 if (reg & INTSTS_IOIRQS) {
464 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
465 continue;
466 }
467 break;
468 }
469
470 dma_stat = __scc_dma_end(drive);
471 if (data_loss)
472 dma_stat |= 2; /* emulate DMA error (to retry command) */
473 return dma_stat;
474}
475
476/* returns 1 if dma irq issued, 0 otherwise */
477static int scc_dma_test_irq(ide_drive_t *drive)
478{
479 ide_hwif_t *hwif = drive->hwif;
480 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
481
482 /* SCC errata A252,A308 workaround: Step4 */
483 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
484 & ATA_ERR) &&
485 (int_stat & INTSTS_INTRQ))
486 return 1;
487
488 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
489 if (int_stat & INTSTS_IOIRQS)
490 return 1;
491
492 return 0;
493}
494
495static u8 scc_udma_filter(ide_drive_t *drive)
496{
497 ide_hwif_t *hwif = drive->hwif;
498 u8 mask = hwif->ultra_mask;
499
500 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
501 if ((drive->media != ide_disk) && (mask & 0xE0)) {
502 printk(KERN_INFO "%s: limit %s to UDMA4\n",
503 SCC_PATA_NAME, drive->name);
504 mask = ATA_UDMA4;
505 }
506
507 return mask;
508}
509
510/**
511 * setup_mmio_scc - map CTRL/BMID region
512 * @dev: PCI device we are configuring
513 * @name: device name
514 *
515 */
516
517static int setup_mmio_scc (struct pci_dev *dev, const char *name)
518{
519 void __iomem *ctl_addr;
520 void __iomem *dma_addr;
521 int i, ret;
522
523 for (i = 0; i < MAX_HWIFS; i++) {
524 if (scc_ports[i].ctl == 0)
525 break;
526 }
527 if (i >= MAX_HWIFS)
528 return -ENOMEM;
529
530 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
531 if (ret < 0) {
532 printk(KERN_ERR "%s: can't reserve resources\n", name);
533 return ret;
534 }
535
536 ctl_addr = pci_ioremap_bar(dev, 0);
537 if (!ctl_addr)
538 goto fail_0;
539
540 dma_addr = pci_ioremap_bar(dev, 1);
541 if (!dma_addr)
542 goto fail_1;
543
544 pci_set_master(dev);
545 scc_ports[i].ctl = (unsigned long)ctl_addr;
546 scc_ports[i].dma = (unsigned long)dma_addr;
547 pci_set_drvdata(dev, (void *) &scc_ports[i]);
548
549 return 1;
550
551 fail_1:
552 iounmap(ctl_addr);
553 fail_0:
554 return -ENOMEM;
555}
556
557static int scc_ide_setup_pci_device(struct pci_dev *dev,
558 const struct ide_port_info *d)
559{
560 struct scc_ports *ports = pci_get_drvdata(dev);
561 struct ide_host *host;
562 struct ide_hw hw, *hws[] = { &hw };
563 int i, rc;
564
565 memset(&hw, 0, sizeof(hw));
566 for (i = 0; i <= 8; i++)
567 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
568 hw.irq = dev->irq;
569 hw.dev = &dev->dev;
570
571 rc = ide_host_add(d, hws, 1, &host);
572 if (rc)
573 return rc;
574
575 ports->host = host;
576
577 return 0;
578}
579
580/**
581 * init_setup_scc - set up an SCC PATA Controller
582 * @dev: PCI device
583 * @d: IDE port info
584 *
585 * Perform the initial set up for this device.
586 */
587
588static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d)
589{
590 unsigned long ctl_base;
591 unsigned long dma_base;
592 unsigned long cckctrl_port;
593 unsigned long intmask_port;
594 unsigned long mode_port;
595 unsigned long ecmode_port;
596 u32 reg = 0;
597 struct scc_ports *ports;
598 int rc;
599
600 rc = pci_enable_device(dev);
601 if (rc)
602 goto end;
603
604 rc = setup_mmio_scc(dev, d->name);
605 if (rc < 0)
606 goto end;
607
608 ports = pci_get_drvdata(dev);
609 ctl_base = ports->ctl;
610 dma_base = ports->dma;
611 cckctrl_port = ctl_base + 0xff0;
612 intmask_port = dma_base + 0x010;
613 mode_port = ctl_base + 0x024;
614 ecmode_port = ctl_base + 0xf00;
615
616 /* controller initialization */
617 reg = 0;
618 out_be32((void*)cckctrl_port, reg);
619 reg |= CCKCTRL_ATACLKOEN;
620 out_be32((void*)cckctrl_port, reg);
621 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
622 out_be32((void*)cckctrl_port, reg);
623 reg |= CCKCTRL_CRST;
624 out_be32((void*)cckctrl_port, reg);
625
626 for (;;) {
627 reg = in_be32((void*)cckctrl_port);
628 if (reg & CCKCTRL_CRST)
629 break;
630 udelay(5000);
631 }
632
633 reg |= CCKCTRL_ATARESET;
634 out_be32((void*)cckctrl_port, reg);
635
636 out_be32((void*)ecmode_port, ECMODE_VALUE);
637 out_be32((void*)mode_port, MODE_JCUSFEN);
638 out_be32((void*)intmask_port, INTMASK_MSK);
639
640 rc = scc_ide_setup_pci_device(dev, d);
641
642 end:
643 return rc;
644}
645
646static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
647{
648 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
649
650 if (valid & IDE_VALID_FEATURE)
651 scc_ide_outb(tf->feature, io_ports->feature_addr);
652 if (valid & IDE_VALID_NSECT)
653 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
654 if (valid & IDE_VALID_LBAL)
655 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
656 if (valid & IDE_VALID_LBAM)
657 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
658 if (valid & IDE_VALID_LBAH)
659 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
660 if (valid & IDE_VALID_DEVICE)
661 scc_ide_outb(tf->device, io_ports->device_addr);
662}
663
664static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
665{
666 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
667
668 if (valid & IDE_VALID_ERROR)
669 tf->error = scc_ide_inb(io_ports->feature_addr);
670 if (valid & IDE_VALID_NSECT)
671 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
672 if (valid & IDE_VALID_LBAL)
673 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
674 if (valid & IDE_VALID_LBAM)
675 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
676 if (valid & IDE_VALID_LBAH)
677 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
678 if (valid & IDE_VALID_DEVICE)
679 tf->device = scc_ide_inb(io_ports->device_addr);
680}
681
682static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
683 void *buf, unsigned int len)
684{
685 unsigned long data_addr = drive->hwif->io_ports.data_addr;
686
687 len++;
688
689 if (drive->io_32bit) {
690 scc_ide_insl(data_addr, buf, len / 4);
691
692 if ((len & 3) >= 2)
693 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
694 } else
695 scc_ide_insw(data_addr, buf, len / 2);
696}
697
698static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
699 void *buf, unsigned int len)
700{
701 unsigned long data_addr = drive->hwif->io_ports.data_addr;
702
703 len++;
704
705 if (drive->io_32bit) {
706 scc_ide_outsl(data_addr, buf, len / 4);
707
708 if ((len & 3) >= 2)
709 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
710 } else
711 scc_ide_outsw(data_addr, buf, len / 2);
712}
713
714/**
715 * init_mmio_iops_scc - set up the iops for MMIO
716 * @hwif: interface to set up
717 *
718 */
719
720static void init_mmio_iops_scc(ide_hwif_t *hwif)
721{
722 struct pci_dev *dev = to_pci_dev(hwif->dev);
723 struct scc_ports *ports = pci_get_drvdata(dev);
724 unsigned long dma_base = ports->dma;
725
726 ide_set_hwifdata(hwif, ports);
727
728 hwif->dma_base = dma_base;
729 hwif->config_data = ports->ctl;
730}
731
732/**
733 * init_iops_scc - set up iops
734 * @hwif: interface to set up
735 *
736 * Do the basic setup for the SCC hardware interface
737 * and then do the MMIO setup.
738 */
739
740static void init_iops_scc(ide_hwif_t *hwif)
741{
742 struct pci_dev *dev = to_pci_dev(hwif->dev);
743
744 hwif->hwif_data = NULL;
745 if (pci_get_drvdata(dev) == NULL)
746 return;
747 init_mmio_iops_scc(hwif);
748}
749
750static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
751{
752 return ide_allocate_dma_engine(hwif);
753}
754
755static u8 scc_cable_detect(ide_hwif_t *hwif)
756{
757 return ATA_CBL_PATA80;
758}
759
760/**
761 * init_hwif_scc - set up hwif
762 * @hwif: interface to set up
763 *
764 * We do the basic set up of the interface structure. The SCC
765 * requires several custom handlers so we override the default
766 * ide DMA handlers appropriately.
767 */
768
769static void init_hwif_scc(ide_hwif_t *hwif)
770{
771 /* PTERADD */
772 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
773
774 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
775 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
776 else
777 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
778}
779
780static const struct ide_tp_ops scc_tp_ops = {
781 .exec_command = scc_exec_command,
782 .read_status = scc_read_status,
783 .read_altstatus = scc_read_altstatus,
784 .write_devctl = scc_write_devctl,
785
786 .dev_select = ide_dev_select,
787 .tf_load = scc_tf_load,
788 .tf_read = scc_tf_read,
789
790 .input_data = scc_input_data,
791 .output_data = scc_output_data,
792};
793
794static const struct ide_port_ops scc_port_ops = {
795 .set_pio_mode = scc_set_pio_mode,
796 .set_dma_mode = scc_set_dma_mode,
797 .udma_filter = scc_udma_filter,
798 .cable_detect = scc_cable_detect,
799};
800
801static const struct ide_dma_ops scc_dma_ops = {
802 .dma_host_set = scc_dma_host_set,
803 .dma_setup = scc_dma_setup,
804 .dma_start = scc_dma_start,
805 .dma_end = scc_dma_end,
806 .dma_test_irq = scc_dma_test_irq,
807 .dma_lost_irq = ide_dma_lost_irq,
808 .dma_timer_expiry = ide_dma_sff_timer_expiry,
809 .dma_sff_read_status = scc_dma_sff_read_status,
810};
811
812static const struct ide_port_info scc_chipset = {
813 .name = "sccIDE",
814 .init_iops = init_iops_scc,
815 .init_dma = scc_init_dma,
816 .init_hwif = init_hwif_scc,
817 .tp_ops = &scc_tp_ops,
818 .port_ops = &scc_port_ops,
819 .dma_ops = &scc_dma_ops,
820 .host_flags = IDE_HFLAG_SINGLE,
821 .irq_flags = IRQF_SHARED,
822 .pio_mask = ATA_PIO4,
823 .chipset = ide_pci,
824};
825
826/**
827 * scc_init_one - pci layer discovery entry
828 * @dev: PCI device
829 * @id: ident table entry
830 *
831 * Called by the PCI code when it finds an SCC PATA controller.
832 * We then use the IDE PCI generic helper to do most of the work.
833 */
834
835static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
836{
837 return init_setup_scc(dev, &scc_chipset);
838}
839
840/**
841 * scc_remove - pci layer remove entry
842 * @dev: PCI device
843 *
844 * Called by the PCI code when it removes an SCC PATA controller.
845 */
846
847static void scc_remove(struct pci_dev *dev)
848{
849 struct scc_ports *ports = pci_get_drvdata(dev);
850 struct ide_host *host = ports->host;
851
852 ide_host_remove(host);
853
854 iounmap((void*)ports->dma);
855 iounmap((void*)ports->ctl);
856 pci_release_selected_regions(dev, (1 << 2) - 1);
857 memset(ports, 0, sizeof(*ports));
858}
859
860static const struct pci_device_id scc_pci_tbl[] = {
861 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
862 { 0, },
863};
864MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
865
866static struct pci_driver scc_pci_driver = {
867 .name = "SCC IDE",
868 .id_table = scc_pci_tbl,
869 .probe = scc_init_one,
870 .remove = scc_remove,
871};
872
873static int __init scc_ide_init(void)
874{
875 return ide_pci_register_driver(&scc_pci_driver);
876}
877
878static void __exit scc_ide_exit(void)
879{
880 pci_unregister_driver(&scc_pci_driver);
881}
882
883module_init(scc_ide_init);
884module_exit(scc_ide_exit);
885
886MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
887MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f80da50d84a5..38339d220d7f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
472 } sgid_addr, dgid_addr; 472 } sgid_addr, dgid_addr;
473 473
474 474
475 ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); 475 rdma_gid2ip(&sgid_addr._sockaddr, sgid);
476 if (ret) 476 rdma_gid2ip(&dgid_addr._sockaddr, dgid);
477 return ret;
478
479 ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
480 if (ret)
481 return ret;
482 477
483 memset(&dev_addr, 0, sizeof(dev_addr)); 478 memset(&dev_addr, 0, sizeof(dev_addr));
484 479
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
512 struct sockaddr_in6 _sockaddr_in6; 507 struct sockaddr_in6 _sockaddr_in6;
513 } gid_addr; 508 } gid_addr;
514 509
515 ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); 510 rdma_gid2ip(&gid_addr._sockaddr, sgid);
516 511
517 if (ret)
518 return ret;
519 memset(&dev_addr, 0, sizeof(dev_addr)); 512 memset(&dev_addr, 0, sizeof(dev_addr));
520 ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); 513 ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
521 if (ret) 514 if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e28a494e2a3a..0c1419105ff0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
437 return cm_id_priv; 437 return cm_id_priv;
438} 438}
439 439
440static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 440static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
441{ 441{
442 int i; 442 int i;
443 443
444 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 444 for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
445 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 445 dst[i] = src[i] & mask[i];
446 ((unsigned long *) mask)[i];
447} 446}
448 447
449static int cm_compare_data(struct ib_cm_compare_data *src_data, 448static int cm_compare_data(struct ib_cm_compare_data *src_data,
450 struct ib_cm_compare_data *dst_data) 449 struct ib_cm_compare_data *dst_data)
451{ 450{
452 u8 src[IB_CM_COMPARE_SIZE]; 451 u32 src[IB_CM_COMPARE_SIZE];
453 u8 dst[IB_CM_COMPARE_SIZE]; 452 u32 dst[IB_CM_COMPARE_SIZE];
454 453
455 if (!src_data || !dst_data) 454 if (!src_data || !dst_data)
456 return 0; 455 return 0;
457 456
458 cm_mask_copy(src, src_data->data, dst_data->mask); 457 cm_mask_copy(src, src_data->data, dst_data->mask);
459 cm_mask_copy(dst, dst_data->data, src_data->mask); 458 cm_mask_copy(dst, dst_data->data, src_data->mask);
460 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 459 return memcmp(src, dst, sizeof(src));
461} 460}
462 461
463static int cm_compare_private_data(u8 *private_data, 462static int cm_compare_private_data(u32 *private_data,
464 struct ib_cm_compare_data *dst_data) 463 struct ib_cm_compare_data *dst_data)
465{ 464{
466 u8 src[IB_CM_COMPARE_SIZE]; 465 u32 src[IB_CM_COMPARE_SIZE];
467 466
468 if (!dst_data) 467 if (!dst_data)
469 return 0; 468 return 0;
470 469
471 cm_mask_copy(src, private_data, dst_data->mask); 470 cm_mask_copy(src, private_data, dst_data->mask);
472 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 471 return memcmp(src, dst_data->data, sizeof(src));
473} 472}
474 473
475/* 474/*
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
538 537
539static struct cm_id_private * cm_find_listen(struct ib_device *device, 538static struct cm_id_private * cm_find_listen(struct ib_device *device,
540 __be64 service_id, 539 __be64 service_id,
541 u8 *private_data) 540 u32 *private_data)
542{ 541{
543 struct rb_node *node = cm.listen_service_table.rb_node; 542 struct rb_node *node = cm.listen_service_table.rb_node;
544 struct cm_id_private *cm_id_priv; 543 struct cm_id_private *cm_id_priv;
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
953 cm_mask_copy(cm_id_priv->compare_data->data, 952 cm_mask_copy(cm_id_priv->compare_data->data,
954 compare_data->data, compare_data->mask); 953 compare_data->data, compare_data->mask);
955 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 954 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
956 IB_CM_COMPARE_SIZE); 955 sizeof(compare_data->mask));
957 } 956 }
958 957
959 cm_id->state = IB_CM_LISTEN; 958 cm_id->state = IB_CM_LISTEN;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index be068f47e47e..8b76f0ef965e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -103,7 +103,7 @@ struct cm_req_msg {
103 /* local ACK timeout:5, rsvd:3 */ 103 /* local ACK timeout:5, rsvd:3 */
104 u8 alt_offset139; 104 u8 alt_offset139;
105 105
106 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE]; 106 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
107 107
108} __attribute__ ((packed)); 108} __attribute__ ((packed));
109 109
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg {
801 __be16 rsvd; 801 __be16 rsvd;
802 __be64 service_id; 802 __be64 service_id;
803 803
804 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; 804 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
805} __attribute__ ((packed)); 805} __attribute__ ((packed));
806 806
807struct cm_sidr_rep_msg { 807struct cm_sidr_rep_msg {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d570030d899c..06441a43c3aa 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
859 memcpy(&ib->sib_addr, &path->dgid, 16); 859 memcpy(&ib->sib_addr, &path->dgid, 16);
860} 860}
861 861
862static __be16 ss_get_port(const struct sockaddr_storage *ss)
863{
864 if (ss->ss_family == AF_INET)
865 return ((struct sockaddr_in *)ss)->sin_port;
866 else if (ss->ss_family == AF_INET6)
867 return ((struct sockaddr_in6 *)ss)->sin6_port;
868 BUG();
869}
870
862static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, 871static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
863 struct cma_hdr *hdr) 872 struct cma_hdr *hdr)
864{ 873{
865 struct sockaddr_in *listen4, *ip4; 874 struct sockaddr_in *ip4;
866 875
867 listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
868 ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; 876 ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
869 ip4->sin_family = listen4->sin_family; 877 ip4->sin_family = AF_INET;
870 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; 878 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
871 ip4->sin_port = listen4->sin_port; 879 ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
872 880
873 ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; 881 ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
874 ip4->sin_family = listen4->sin_family; 882 ip4->sin_family = AF_INET;
875 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; 883 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
876 ip4->sin_port = hdr->port; 884 ip4->sin_port = hdr->port;
877} 885}
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
879static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, 887static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
880 struct cma_hdr *hdr) 888 struct cma_hdr *hdr)
881{ 889{
882 struct sockaddr_in6 *listen6, *ip6; 890 struct sockaddr_in6 *ip6;
883 891
884 listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
885 ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; 892 ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
886 ip6->sin6_family = listen6->sin6_family; 893 ip6->sin6_family = AF_INET6;
887 ip6->sin6_addr = hdr->dst_addr.ip6; 894 ip6->sin6_addr = hdr->dst_addr.ip6;
888 ip6->sin6_port = listen6->sin6_port; 895 ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
889 896
890 ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; 897 ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
891 ip6->sin6_family = listen6->sin6_family; 898 ip6->sin6_family = AF_INET6;
892 ip6->sin6_addr = hdr->src_addr.ip6; 899 ip6->sin6_addr = hdr->src_addr.ip6;
893 ip6->sin6_port = hdr->port; 900 ip6->sin6_port = hdr->port;
894} 901}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index b85ddbc979e0..e6ffa2e66c1a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -33,7 +33,7 @@
33 33
34#include "iwpm_util.h" 34#include "iwpm_util.h"
35 35
36static const char iwpm_ulib_name[] = "iWarpPortMapperUser"; 36static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
37static int iwpm_ulib_version = 3; 37static int iwpm_ulib_version = 3;
38static int iwpm_user_pid = IWPM_PID_UNDEFINED; 38static int iwpm_user_pid = IWPM_PID_UNDEFINED;
39static atomic_t echo_nlmsg_seq; 39static atomic_t echo_nlmsg_seq;
@@ -468,7 +468,8 @@ add_mapping_response_exit:
468} 468}
469EXPORT_SYMBOL(iwpm_add_mapping_cb); 469EXPORT_SYMBOL(iwpm_add_mapping_cb);
470 470
471/* netlink attribute policy for the response to add and query mapping request */ 471/* netlink attribute policy for the response to add and query mapping request
472 * and response with remote address info */
472static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { 473static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
473 [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, 474 [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
474 [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, 475 [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
@@ -559,6 +560,76 @@ query_mapping_response_exit:
559} 560}
560EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); 561EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
561 562
563/*
564 * iwpm_remote_info_cb - Process a port mapper message, containing
565 * the remote connecting peer address info
566 */
567int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
568{
569 struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
570 struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
571 struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
572 struct iwpm_remote_info *rem_info;
573 const char *msg_type;
574 u8 nl_client;
575 int ret = -EINVAL;
576
577 msg_type = "Remote Mapping info";
578 if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
579 resp_query_policy, nltb, msg_type))
580 return ret;
581
582 nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
583 if (!iwpm_valid_client(nl_client)) {
584 pr_info("%s: Invalid port mapper client = %d\n",
585 __func__, nl_client);
586 return ret;
587 }
588 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
589
590 local_sockaddr = (struct sockaddr_storage *)
591 nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
592 remote_sockaddr = (struct sockaddr_storage *)
593 nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
594 mapped_loc_sockaddr = (struct sockaddr_storage *)
595 nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
596 mapped_rem_sockaddr = (struct sockaddr_storage *)
597 nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
598
599 if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
600 mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
601 pr_info("%s: Sockaddr family doesn't match the requested one\n",
602 __func__);
603 return ret;
604 }
605 rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
606 if (!rem_info) {
607 pr_err("%s: Unable to allocate a remote info\n", __func__);
608 ret = -ENOMEM;
609 return ret;
610 }
611 memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr,
612 sizeof(struct sockaddr_storage));
613 memcpy(&rem_info->remote_sockaddr, remote_sockaddr,
614 sizeof(struct sockaddr_storage));
615 memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr,
616 sizeof(struct sockaddr_storage));
617 rem_info->nl_client = nl_client;
618
619 iwpm_add_remote_info(rem_info);
620
621 iwpm_print_sockaddr(local_sockaddr,
622 "remote_info: Local sockaddr:");
623 iwpm_print_sockaddr(mapped_loc_sockaddr,
624 "remote_info: Mapped local sockaddr:");
625 iwpm_print_sockaddr(remote_sockaddr,
626 "remote_info: Remote sockaddr:");
627 iwpm_print_sockaddr(mapped_rem_sockaddr,
628 "remote_info: Mapped remote sockaddr:");
629 return ret;
630}
631EXPORT_SYMBOL(iwpm_remote_info_cb);
632
562/* netlink attribute policy for the received request for mapping info */ 633/* netlink attribute policy for the received request for mapping info */
563static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { 634static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
564 [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, 635 [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 69e9f84c1605..a626795bf9c7 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -33,8 +33,10 @@
33 33
34#include "iwpm_util.h" 34#include "iwpm_util.h"
35 35
36#define IWPM_HASH_BUCKET_SIZE 512 36#define IWPM_MAPINFO_HASH_SIZE 512
37#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) 37#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
38#define IWPM_REMINFO_HASH_SIZE 64
39#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
38 40
39static LIST_HEAD(iwpm_nlmsg_req_list); 41static LIST_HEAD(iwpm_nlmsg_req_list);
40static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); 42static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
42static struct hlist_head *iwpm_hash_bucket; 44static struct hlist_head *iwpm_hash_bucket;
43static DEFINE_SPINLOCK(iwpm_mapinfo_lock); 45static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
44 46
47static struct hlist_head *iwpm_reminfo_bucket;
48static DEFINE_SPINLOCK(iwpm_reminfo_lock);
49
45static DEFINE_MUTEX(iwpm_admin_lock); 50static DEFINE_MUTEX(iwpm_admin_lock);
46static struct iwpm_admin_data iwpm_admin; 51static struct iwpm_admin_data iwpm_admin;
47 52
48int iwpm_init(u8 nl_client) 53int iwpm_init(u8 nl_client)
49{ 54{
55 int ret = 0;
50 if (iwpm_valid_client(nl_client)) 56 if (iwpm_valid_client(nl_client))
51 return -EINVAL; 57 return -EINVAL;
52 mutex_lock(&iwpm_admin_lock); 58 mutex_lock(&iwpm_admin_lock);
53 if (atomic_read(&iwpm_admin.refcount) == 0) { 59 if (atomic_read(&iwpm_admin.refcount) == 0) {
54 iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * 60 iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
55 sizeof(struct hlist_head), GFP_KERNEL); 61 sizeof(struct hlist_head), GFP_KERNEL);
56 if (!iwpm_hash_bucket) { 62 if (!iwpm_hash_bucket) {
57 mutex_unlock(&iwpm_admin_lock); 63 ret = -ENOMEM;
58 pr_err("%s Unable to create mapinfo hash table\n", __func__); 64 pr_err("%s Unable to create mapinfo hash table\n", __func__);
59 return -ENOMEM; 65 goto init_exit;
66 }
67 iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
68 sizeof(struct hlist_head), GFP_KERNEL);
69 if (!iwpm_reminfo_bucket) {
70 kfree(iwpm_hash_bucket);
71 ret = -ENOMEM;
72 pr_err("%s Unable to create reminfo hash table\n", __func__);
73 goto init_exit;
60 } 74 }
61 } 75 }
62 atomic_inc(&iwpm_admin.refcount); 76 atomic_inc(&iwpm_admin.refcount);
77init_exit:
63 mutex_unlock(&iwpm_admin_lock); 78 mutex_unlock(&iwpm_admin_lock);
64 iwpm_set_valid(nl_client, 1); 79 if (!ret) {
65 return 0; 80 iwpm_set_valid(nl_client, 1);
81 pr_debug("%s: Mapinfo and reminfo tables are created\n",
82 __func__);
83 }
84 return ret;
66} 85}
67EXPORT_SYMBOL(iwpm_init); 86EXPORT_SYMBOL(iwpm_init);
68 87
69static void free_hash_bucket(void); 88static void free_hash_bucket(void);
89static void free_reminfo_bucket(void);
70 90
71int iwpm_exit(u8 nl_client) 91int iwpm_exit(u8 nl_client)
72{ 92{
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client)
81 } 101 }
82 if (atomic_dec_and_test(&iwpm_admin.refcount)) { 102 if (atomic_dec_and_test(&iwpm_admin.refcount)) {
83 free_hash_bucket(); 103 free_hash_bucket();
84 pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); 104 free_reminfo_bucket();
105 pr_debug("%s: Resources are destroyed\n", __func__);
85 } 106 }
86 mutex_unlock(&iwpm_admin_lock); 107 mutex_unlock(&iwpm_admin_lock);
87 iwpm_set_valid(nl_client, 0); 108 iwpm_set_valid(nl_client, 0);
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client)
89} 110}
90EXPORT_SYMBOL(iwpm_exit); 111EXPORT_SYMBOL(iwpm_exit);
91 112
92static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, 113static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
93 struct sockaddr_storage *); 114 struct sockaddr_storage *);
94 115
95int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, 116int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
99 struct hlist_head *hash_bucket_head; 120 struct hlist_head *hash_bucket_head;
100 struct iwpm_mapping_info *map_info; 121 struct iwpm_mapping_info *map_info;
101 unsigned long flags; 122 unsigned long flags;
123 int ret = -EINVAL;
102 124
103 if (!iwpm_valid_client(nl_client)) 125 if (!iwpm_valid_client(nl_client))
104 return -EINVAL; 126 return ret;
105 map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); 127 map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
106 if (!map_info) { 128 if (!map_info) {
107 pr_err("%s: Unable to allocate a mapping info\n", __func__); 129 pr_err("%s: Unable to allocate a mapping info\n", __func__);
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
115 137
116 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 138 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
117 if (iwpm_hash_bucket) { 139 if (iwpm_hash_bucket) {
118 hash_bucket_head = get_hash_bucket_head( 140 hash_bucket_head = get_mapinfo_hash_bucket(
119 &map_info->local_sockaddr, 141 &map_info->local_sockaddr,
120 &map_info->mapped_sockaddr); 142 &map_info->mapped_sockaddr);
121 hlist_add_head(&map_info->hlist_node, hash_bucket_head); 143 if (hash_bucket_head) {
144 hlist_add_head(&map_info->hlist_node, hash_bucket_head);
145 ret = 0;
146 }
122 } 147 }
123 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 148 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
124 return 0; 149 return ret;
125} 150}
126EXPORT_SYMBOL(iwpm_create_mapinfo); 151EXPORT_SYMBOL(iwpm_create_mapinfo);
127 152
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
136 161
137 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 162 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
138 if (iwpm_hash_bucket) { 163 if (iwpm_hash_bucket) {
139 hash_bucket_head = get_hash_bucket_head( 164 hash_bucket_head = get_mapinfo_hash_bucket(
140 local_sockaddr, 165 local_sockaddr,
141 mapped_local_addr); 166 mapped_local_addr);
167 if (!hash_bucket_head)
168 goto remove_mapinfo_exit;
169
142 hlist_for_each_entry_safe(map_info, tmp_hlist_node, 170 hlist_for_each_entry_safe(map_info, tmp_hlist_node,
143 hash_bucket_head, hlist_node) { 171 hash_bucket_head, hlist_node) {
144 172
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
152 } 180 }
153 } 181 }
154 } 182 }
183remove_mapinfo_exit:
155 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 184 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
156 return ret; 185 return ret;
157} 186}
@@ -166,7 +195,7 @@ static void free_hash_bucket(void)
166 195
167 /* remove all the mapinfo data from the list */ 196 /* remove all the mapinfo data from the list */
168 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 197 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
169 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 198 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
170 hlist_for_each_entry_safe(map_info, tmp_hlist_node, 199 hlist_for_each_entry_safe(map_info, tmp_hlist_node,
171 &iwpm_hash_bucket[i], hlist_node) { 200 &iwpm_hash_bucket[i], hlist_node) {
172 201
@@ -180,6 +209,96 @@ static void free_hash_bucket(void)
180 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 209 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
181} 210}
182 211
212static void free_reminfo_bucket(void)
213{
214 struct hlist_node *tmp_hlist_node;
215 struct iwpm_remote_info *rem_info;
216 unsigned long flags;
217 int i;
218
219 /* remove all the remote info from the list */
220 spin_lock_irqsave(&iwpm_reminfo_lock, flags);
221 for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) {
222 hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
223 &iwpm_reminfo_bucket[i], hlist_node) {
224
225 hlist_del_init(&rem_info->hlist_node);
226 kfree(rem_info);
227 }
228 }
229 /* free the hash list */
230 kfree(iwpm_reminfo_bucket);
231 iwpm_reminfo_bucket = NULL;
232 spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
233}
234
235static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *,
236 struct sockaddr_storage *);
237
238void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
239{
240 struct hlist_head *hash_bucket_head;
241 unsigned long flags;
242
243 spin_lock_irqsave(&iwpm_reminfo_lock, flags);
244 if (iwpm_reminfo_bucket) {
245 hash_bucket_head = get_reminfo_hash_bucket(
246 &rem_info->mapped_loc_sockaddr,
247 &rem_info->mapped_rem_sockaddr);
248 if (hash_bucket_head)
249 hlist_add_head(&rem_info->hlist_node, hash_bucket_head);
250 }
251 spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
252}
253
254int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
255 struct sockaddr_storage *mapped_rem_addr,
256 struct sockaddr_storage *remote_addr,
257 u8 nl_client)
258{
259 struct hlist_node *tmp_hlist_node;
260 struct hlist_head *hash_bucket_head;
261 struct iwpm_remote_info *rem_info = NULL;
262 unsigned long flags;
263 int ret = -EINVAL;
264
265 if (!iwpm_valid_client(nl_client)) {
266 pr_info("%s: Invalid client = %d\n", __func__, nl_client);
267 return ret;
268 }
269 spin_lock_irqsave(&iwpm_reminfo_lock, flags);
270 if (iwpm_reminfo_bucket) {
271 hash_bucket_head = get_reminfo_hash_bucket(
272 mapped_loc_addr,
273 mapped_rem_addr);
274 if (!hash_bucket_head)
275 goto get_remote_info_exit;
276 hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
277 hash_bucket_head, hlist_node) {
278
279 if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr,
280 mapped_loc_addr) &&
281 !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr,
282 mapped_rem_addr)) {
283
284 memcpy(remote_addr, &rem_info->remote_sockaddr,
285 sizeof(struct sockaddr_storage));
286 iwpm_print_sockaddr(remote_addr,
287 "get_remote_info: Remote sockaddr:");
288
289 hlist_del_init(&rem_info->hlist_node);
290 kfree(rem_info);
291 ret = 0;
292 break;
293 }
294 }
295 }
296get_remote_info_exit:
297 spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
298 return ret;
299}
300EXPORT_SYMBOL(iwpm_get_remote_info);
301
183struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, 302struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
184 u8 nl_client, gfp_t gfp) 303 u8 nl_client, gfp_t gfp)
185{ 304{
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
409 return hash; 528 return hash;
410} 529}
411 530
412static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage 531static int get_hash_bucket(struct sockaddr_storage *a_sockaddr,
413 *local_sockaddr, 532 struct sockaddr_storage *b_sockaddr, u32 *hash)
414 struct sockaddr_storage
415 *mapped_sockaddr)
416{ 533{
417 u32 local_hash, mapped_hash, hash; 534 u32 a_hash, b_hash;
418 535
419 if (local_sockaddr->ss_family == AF_INET) { 536 if (a_sockaddr->ss_family == AF_INET) {
420 local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); 537 a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr);
421 mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); 538 b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr);
422 539
423 } else if (local_sockaddr->ss_family == AF_INET6) { 540 } else if (a_sockaddr->ss_family == AF_INET6) {
424 local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); 541 a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr);
425 mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); 542 b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr);
426 } else { 543 } else {
427 pr_err("%s: Invalid sockaddr family\n", __func__); 544 pr_err("%s: Invalid sockaddr family\n", __func__);
428 return NULL; 545 return -EINVAL;
429 } 546 }
430 547
431 if (local_hash == mapped_hash) /* if port mapper isn't available */ 548 if (a_hash == b_hash) /* if port mapper isn't available */
432 hash = local_hash; 549 *hash = a_hash;
433 else 550 else
434 hash = jhash_2words(local_hash, mapped_hash, 0); 551 *hash = jhash_2words(a_hash, b_hash, 0);
552 return 0;
553}
554
555static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage
556 *local_sockaddr, struct sockaddr_storage
557 *mapped_sockaddr)
558{
559 u32 hash;
560 int ret;
435 561
436 return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; 562 ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash);
563 if (ret)
564 return NULL;
565 return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK];
566}
567
568static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage
569 *mapped_loc_sockaddr, struct sockaddr_storage
570 *mapped_rem_sockaddr)
571{
572 u32 hash;
573 int ret;
574
575 ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash);
576 if (ret)
577 return NULL;
578 return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK];
437} 579}
438 580
439static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) 581static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
512 } 654 }
513 skb_num++; 655 skb_num++;
514 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 656 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
515 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 657 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
516 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], 658 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
517 hlist_node) { 659 hlist_node) {
518 if (map_info->nl_client != nl_client) 660 if (map_info->nl_client != nl_client)
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void)
595 737
596 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 738 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
597 if (iwpm_hash_bucket) { 739 if (iwpm_hash_bucket) {
598 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 740 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
599 if (!hlist_empty(&iwpm_hash_bucket[i])) { 741 if (!hlist_empty(&iwpm_hash_bucket[i])) {
600 full_bucket = 1; 742 full_bucket = 1;
601 break; 743 break;
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 9777c869a140..ee2d9ff095be 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -76,6 +76,14 @@ struct iwpm_mapping_info {
76 u8 nl_client; 76 u8 nl_client;
77}; 77};
78 78
79struct iwpm_remote_info {
80 struct hlist_node hlist_node;
81 struct sockaddr_storage remote_sockaddr;
82 struct sockaddr_storage mapped_loc_sockaddr;
83 struct sockaddr_storage mapped_rem_sockaddr;
84 u8 nl_client;
85};
86
79struct iwpm_admin_data { 87struct iwpm_admin_data {
80 atomic_t refcount; 88 atomic_t refcount;
81 atomic_t nlmsg_seq; 89 atomic_t nlmsg_seq;
@@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
128int iwpm_get_nlmsg_seq(void); 136int iwpm_get_nlmsg_seq(void);
129 137
130/** 138/**
139 * iwpm_add_reminfo - Add remote address info of the connecting peer
140 * to the remote info hash table
141 * @reminfo: The remote info to be added
142 */
143void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
144
145/**
131 * iwpm_valid_client - Check if the port mapper client is valid 146 * iwpm_valid_client - Check if the port mapper client is valid
132 * @nl_client: The index of the netlink client 147 * @nl_client: The index of the netlink client
133 * 148 *
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8b8cc6fa0ab0..40becdb3196e 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
446 int remove_existing_mapping = 0; 446 int remove_existing_mapping = 0;
447 int ret = 0; 447 int ret = 0;
448 448
449 mutex_lock(&umem->odp_data->umem_mutex);
450 /* 449 /*
451 * Note: we avoid writing if seq is different from the initial seq, to 450 * Note: we avoid writing if seq is different from the initial seq, to
452 * handle case of a racing notifier. This check also allows us to bail 451 * handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
479 } 478 }
480 479
481out: 480out:
482 mutex_unlock(&umem->odp_data->umem_mutex);
483
484 /* On Demand Paging - avoid pinning the page */ 481 /* On Demand Paging - avoid pinning the page */
485 if (umem->context->invalidate_range || !stored_page) 482 if (umem->context->invalidate_range || !stored_page)
486 put_page(page); 483 put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
586 583
587 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 584 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
588 user_virt += npages << PAGE_SHIFT; 585 user_virt += npages << PAGE_SHIFT;
586 mutex_lock(&umem->odp_data->umem_mutex);
589 for (j = 0; j < npages; ++j) { 587 for (j = 0; j < npages; ++j) {
590 ret = ib_umem_odp_map_dma_single_page( 588 ret = ib_umem_odp_map_dma_single_page(
591 umem, k, base_virt_addr, local_page_list[j], 589 umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
594 break; 592 break;
595 k++; 593 k++;
596 } 594 }
595 mutex_unlock(&umem->odp_data->umem_mutex);
597 596
598 if (ret < 0) { 597 if (ret < 0) {
599 /* Release left over pages when handling errors. */ 598 /* Release left over pages when handling errors. */
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
633 * faults from completion. We might be racing with other 632 * faults from completion. We might be racing with other
634 * invalidations, so we must make sure we free each page only 633 * invalidations, so we must make sure we free each page only
635 * once. */ 634 * once. */
635 mutex_lock(&umem->odp_data->umem_mutex);
636 for (addr = virt; addr < bound; addr += (u64)umem->page_size) { 636 for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; 637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
638 mutex_lock(&umem->odp_data->umem_mutex);
639 if (umem->odp_data->page_list[idx]) { 638 if (umem->odp_data->page_list[idx]) {
640 struct page *page = umem->odp_data->page_list[idx]; 639 struct page *page = umem->odp_data->page_list[idx];
641 struct page *head_page = compound_head(page);
642 dma_addr_t dma = umem->odp_data->dma_list[idx]; 640 dma_addr_t dma = umem->odp_data->dma_list[idx];
643 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 641 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
644 642
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
646 644
647 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 645 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
648 DMA_BIDIRECTIONAL); 646 DMA_BIDIRECTIONAL);
649 if (dma & ODP_WRITE_ALLOWED_BIT) 647 if (dma & ODP_WRITE_ALLOWED_BIT) {
648 struct page *head_page = compound_head(page);
650 /* 649 /*
651 * set_page_dirty prefers being called with 650 * set_page_dirty prefers being called with
652 * the page lock. However, MMU notifiers are 651 * the page lock. However, MMU notifiers are
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
657 * be removed. 656 * be removed.
658 */ 657 */
659 set_page_dirty(head_page); 658 set_page_dirty(head_page);
659 }
660 /* on demand pinning support */ 660 /* on demand pinning support */
661 if (!umem->context->invalidate_range) 661 if (!umem->context->invalidate_range)
662 put_page(page); 662 put_page(page);
663 umem->odp_data->page_list[idx] = NULL; 663 umem->odp_data->page_list[idx] = NULL;
664 umem->odp_data->dma_list[idx] = 0; 664 umem->odp_data->dma_list[idx] = 0;
665 } 665 }
666 mutex_unlock(&umem->odp_data->umem_mutex);
667 } 666 }
667 mutex_unlock(&umem->odp_data->umem_mutex);
668} 668}
669EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 669EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 57176ddd4c50..3ad8dc798f52 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
583 sizeof(ep->com.mapped_remote_addr)); 583 sizeof(ep->com.mapped_remote_addr));
584} 584}
585 585
586static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
587{
588 int ret;
589
590 print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
591 print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
592
593 ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
594 &child_ep->com.mapped_remote_addr,
595 &child_ep->com.remote_addr, RDMA_NL_C4IW);
596 if (ret)
597 PDBG("Unable to find remote peer addr info - err %d\n", ret);
598
599 return ret;
600}
601
586static void best_mtu(const unsigned short *mtus, unsigned short mtu, 602static void best_mtu(const unsigned short *mtus, unsigned short mtu,
587 unsigned int *idx, int use_ts, int ipv6) 603 unsigned int *idx, int use_ts, int ipv6)
588{ 604{
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep)
675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 691 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
676 opt2 |= T5_OPT_2_VALID_F; 692 opt2 |= T5_OPT_2_VALID_F;
677 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 693 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 694 opt2 |= T5_ISS_F;
679 } 695 }
680 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 696 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
681 697
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2042 status, status2errno(status)); 2058 status, status2errno(status));
2043 2059
2044 if (is_neg_adv(status)) { 2060 if (is_neg_adv(status)) {
2045 dev_warn(&dev->rdev.lldi.pdev->dev, 2061 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2046 "Connection problems for atid %u status %u (%s)\n", 2062 __func__, atid, status, neg_adv_str(status));
2047 atid, status, neg_adv_str(status)); 2063 ep->stats.connect_neg_adv++;
2064 mutex_lock(&dev->rdev.stats.lock);
2065 dev->rdev.stats.neg_adv++;
2066 mutex_unlock(&dev->rdev.stats.lock);
2048 return 0; 2067 return 0;
2049 } 2068 }
2050 2069
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2214 u32 isn = (prandom_u32() & ~7UL) - 1; 2233 u32 isn = (prandom_u32() & ~7UL) - 1;
2215 opt2 |= T5_OPT_2_VALID_F; 2234 opt2 |= T5_OPT_2_VALID_F;
2216 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2235 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2217 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2236 opt2 |= T5_ISS_F;
2218 rpl5 = (void *)rpl; 2237 rpl5 = (void *)rpl;
2219 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2238 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2220 if (peer2peer) 2239 if (peer2peer)
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2352 state_set(&child_ep->com, CONNECTING); 2371 state_set(&child_ep->com, CONNECTING);
2353 child_ep->com.dev = dev; 2372 child_ep->com.dev = dev;
2354 child_ep->com.cm_id = NULL; 2373 child_ep->com.cm_id = NULL;
2374
2375 /*
2376 * The mapped_local and mapped_remote addresses get setup with
2377 * the actual 4-tuple. The local address will be based on the
2378 * actual local address of the connection, but on the port number
2379 * of the parent listening endpoint. The remote address is
2380 * setup based on a query to the IWPM since we don't know what it
2381 * originally was before mapping. If no mapping was done, then
2382 * mapped_remote == remote, and mapped_local == local.
2383 */
2355 if (iptype == 4) { 2384 if (iptype == 4) {
2356 struct sockaddr_in *sin = (struct sockaddr_in *) 2385 struct sockaddr_in *sin = (struct sockaddr_in *)
2357 &child_ep->com.local_addr; 2386 &child_ep->com.mapped_local_addr;
2387
2358 sin->sin_family = PF_INET; 2388 sin->sin_family = PF_INET;
2359 sin->sin_port = local_port; 2389 sin->sin_port = local_port;
2360 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2390 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2361 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2391
2392 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2393 sin->sin_family = PF_INET;
2394 sin->sin_port = ((struct sockaddr_in *)
2395 &parent_ep->com.local_addr)->sin_port;
2396 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2397
2398 sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
2362 sin->sin_family = PF_INET; 2399 sin->sin_family = PF_INET;
2363 sin->sin_port = peer_port; 2400 sin->sin_port = peer_port;
2364 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2401 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2365 } else { 2402 } else {
2366 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 2403 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2367 &child_ep->com.local_addr; 2404 &child_ep->com.mapped_local_addr;
2405
2368 sin6->sin6_family = PF_INET6; 2406 sin6->sin6_family = PF_INET6;
2369 sin6->sin6_port = local_port; 2407 sin6->sin6_port = local_port;
2370 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2408 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2371 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2409
2410 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2411 sin6->sin6_family = PF_INET6;
2412 sin6->sin6_port = ((struct sockaddr_in6 *)
2413 &parent_ep->com.local_addr)->sin6_port;
2414 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2415
2416 sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
2372 sin6->sin6_family = PF_INET6; 2417 sin6->sin6_family = PF_INET6;
2373 sin6->sin6_port = peer_port; 2418 sin6->sin6_port = peer_port;
2374 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2419 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2375 } 2420 }
2421 memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
2422 sizeof(child_ep->com.remote_addr));
2423 get_remote_addr(parent_ep, child_ep);
2424
2376 c4iw_get_ep(&parent_ep->com); 2425 c4iw_get_ep(&parent_ep->com);
2377 child_ep->parent_ep = parent_ep; 2426 child_ep->parent_ep = parent_ep;
2378 child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 2427 child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2520 2569
2521 ep = lookup_tid(t, tid); 2570 ep = lookup_tid(t, tid);
2522 if (is_neg_adv(req->status)) { 2571 if (is_neg_adv(req->status)) {
2523 dev_warn(&dev->rdev.lldi.pdev->dev, 2572 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2524 "Negative advice on abort - tid %u status %d (%s)\n", 2573 __func__, ep->hwtid, req->status,
2525 ep->hwtid, req->status, neg_adv_str(req->status)); 2574 neg_adv_str(req->status));
2575 ep->stats.abort_neg_adv++;
2576 mutex_lock(&dev->rdev.stats.lock);
2577 dev->rdev.stats.neg_adv++;
2578 mutex_unlock(&dev->rdev.stats.lock);
2526 return 0; 2579 return 0;
2527 } 2580 }
2528 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2581 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3571 * TP will ignore any value > 0 for MSS index. 3624 * TP will ignore any value > 0 for MSS index.
3572 */ 3625 */
3573 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 3626 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3574 req->cookie = (unsigned long)skb; 3627 req->cookie = (uintptr_t)skb;
3575 3628
3576 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3629 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3577 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3630 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3931 return 0; 3984 return 0;
3932 } 3985 }
3933 if (is_neg_adv(req->status)) { 3986 if (is_neg_adv(req->status)) {
3934 dev_warn(&dev->rdev.lldi.pdev->dev, 3987 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
3935 "Negative advice on abort - tid %u status %d (%s)\n", 3988 __func__, ep->hwtid, req->status,
3936 ep->hwtid, req->status, neg_adv_str(req->status)); 3989 neg_adv_str(req->status));
3990 ep->stats.abort_neg_adv++;
3991 dev->rdev.stats.neg_adv++;
3937 kfree_skb(skb); 3992 kfree_skb(skb);
3938 return 0; 3993 return 0;
3939 } 3994 }
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ab7692ac2044..68ddb3710215 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
55 FW_RI_RES_WR_NRES_V(1) | 55 FW_RI_RES_WR_NRES_V(1) |
56 FW_WR_COMPL_F); 56 FW_WR_COMPL_F);
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (unsigned long) &wr_wait; 58 res_wr->cookie = (uintptr_t)&wr_wait;
59 res = res_wr->res; 59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ; 60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET; 61 res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
125 FW_RI_RES_WR_NRES_V(1) | 125 FW_RI_RES_WR_NRES_V(1) |
126 FW_WR_COMPL_F); 126 FW_WR_COMPL_F);
127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 res_wr->cookie = (unsigned long) &wr_wait; 128 res_wr->cookie = (uintptr_t)&wr_wait;
129 res = res_wr->res; 129 res = res_wr->res;
130 res->u.cq.restype = FW_RI_RES_TYPE_CQ; 130 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
131 res->u.cq.op = FW_RI_RES_OP_WRITE; 131 res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
156 goto err4; 156 goto err4;
157 157
158 cq->gen = 1; 158 cq->gen = 1;
159 cq->gts = rdev->lldi.gts_reg;
160 cq->rdev = rdev; 159 cq->rdev = rdev;
161 if (user) { 160 if (user) {
162 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + 161 u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
163 (cq->cqid << rdev->cqshift); 162
164 cq->ugts &= PAGE_MASK; 163 cq->ugts = (u64)rdev->bar2_pa + off;
164 } else if (is_t4(rdev->lldi.adapter_type)) {
165 cq->gts = rdev->lldi.gts_reg;
166 cq->qid_mask = -1U;
167 } else {
168 u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
169
170 cq->gts = rdev->bar2_kva + off;
171 cq->qid_mask = rdev->qpmask;
165 } 172 }
166 return 0; 173 return 0;
167err4: 174err4:
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
970 } 977 }
971 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", 978 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
972 __func__, chp->cq.cqid, chp, chp->cq.size, 979 __func__, chp->cq.cqid, chp, chp->cq.size,
973 chp->cq.memsize, 980 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
974 (unsigned long long) chp->cq.dma_addr);
975 return &chp->ibcq; 981 return &chp->ibcq;
976err5: 982err5:
977 kfree(mm2); 983 kfree(mm2);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8fb295e4a9ab..7e895d714b19 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, 95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
96 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
96 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, 97 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
97 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} 98 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
98}; 99};
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
151 int prev_ts_set = 0; 152 int prev_ts_set = 0;
152 int idx, end; 153 int idx, end;
153 154
154#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) 155#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
155 156
156 idx = atomic_read(&dev->rdev.wr_log_idx) & 157 idx = atomic_read(&dev->rdev.wr_log_idx) &
157 (dev->rdev.wr_log_size - 1); 158 (dev->rdev.wr_log_size - 1);
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v)
489 dev->rdev.stats.act_ofld_conn_fails); 490 dev->rdev.stats.act_ofld_conn_fails);
490 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 491 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
491 dev->rdev.stats.pas_ofld_conn_fails); 492 dev->rdev.stats.pas_ofld_conn_fails);
493 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
492 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); 494 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
493 return 0; 495 return 0;
494} 496}
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data)
560 cc = snprintf(epd->buf + epd->pos, space, 562 cc = snprintf(epd->buf + epd->pos, space,
561 "ep %p cm_id %p qp %p state %d flags 0x%lx " 563 "ep %p cm_id %p qp %p state %d flags 0x%lx "
562 "history 0x%lx hwtid %d atid %d " 564 "history 0x%lx hwtid %d atid %d "
565 "conn_na %u abort_na %u "
563 "%pI4:%d/%d <-> %pI4:%d/%d\n", 566 "%pI4:%d/%d <-> %pI4:%d/%d\n",
564 ep, ep->com.cm_id, ep->com.qp, 567 ep, ep->com.cm_id, ep->com.qp,
565 (int)ep->com.state, ep->com.flags, 568 (int)ep->com.state, ep->com.flags,
566 ep->com.history, ep->hwtid, ep->atid, 569 ep->com.history, ep->hwtid, ep->atid,
570 ep->stats.connect_neg_adv,
571 ep->stats.abort_neg_adv,
567 &lsin->sin_addr, ntohs(lsin->sin_port), 572 &lsin->sin_addr, ntohs(lsin->sin_port),
568 ntohs(mapped_lsin->sin_port), 573 ntohs(mapped_lsin->sin_port),
569 &rsin->sin_addr, ntohs(rsin->sin_port), 574 &rsin->sin_addr, ntohs(rsin->sin_port),
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data)
581 cc = snprintf(epd->buf + epd->pos, space, 586 cc = snprintf(epd->buf + epd->pos, space,
582 "ep %p cm_id %p qp %p state %d flags 0x%lx " 587 "ep %p cm_id %p qp %p state %d flags 0x%lx "
583 "history 0x%lx hwtid %d atid %d " 588 "history 0x%lx hwtid %d atid %d "
589 "conn_na %u abort_na %u "
584 "%pI6:%d/%d <-> %pI6:%d/%d\n", 590 "%pI6:%d/%d <-> %pI6:%d/%d\n",
585 ep, ep->com.cm_id, ep->com.qp, 591 ep, ep->com.cm_id, ep->com.qp,
586 (int)ep->com.state, ep->com.flags, 592 (int)ep->com.state, ep->com.flags,
587 ep->com.history, ep->hwtid, ep->atid, 593 ep->com.history, ep->hwtid, ep->atid,
594 ep->stats.connect_neg_adv,
595 ep->stats.abort_neg_adv,
588 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 596 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
589 ntohs(mapped_lsin6->sin6_port), 597 ntohs(mapped_lsin6->sin6_port),
590 &rsin6->sin6_addr, ntohs(rsin6->sin6_port), 598 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
@@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
765 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 773 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
766 774
767 /* 775 /*
776 * This implementation assumes udb_density == ucq_density! Eventually
777 * we might need to support this but for now fail the open. Also the
778 * cqid and qpid range must match for now.
779 */
780 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
781 pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
782 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
783 rdev->lldi.ucq_density);
784 err = -EINVAL;
785 goto err1;
786 }
787 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
788 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
789 pr_err(MOD "%s: unsupported qp and cq id ranges "
790 "qp start %u size %u cq start %u size %u\n",
791 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
792 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
793 rdev->lldi.vr->cq.size);
794 err = -EINVAL;
795 goto err1;
796 }
797
798 /*
768 * qpshift is the number of bits to shift the qpid left in order 799 * qpshift is the number of bits to shift the qpid left in order
769 * to get the correct address of the doorbell for that qp. 800 * to get the correct address of the doorbell for that qp.
770 */ 801 */
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
784 rdev->lldi.vr->qp.size, 815 rdev->lldi.vr->qp.size,
785 rdev->lldi.vr->cq.start, 816 rdev->lldi.vr->cq.start,
786 rdev->lldi.vr->cq.size); 817 rdev->lldi.vr->cq.size);
787 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " 818 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
788 "qpmask 0x%x cqshift %lu cqmask 0x%x\n", 819 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
789 (unsigned)pci_resource_len(rdev->lldi.pdev, 2), 820 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
790 (u64)pci_resource_start(rdev->lldi.pdev, 2), 821 (void *)pci_resource_start(rdev->lldi.pdev, 2),
791 rdev->lldi.db_reg, 822 rdev->lldi.db_reg,
792 rdev->lldi.gts_reg, 823 rdev->lldi.gts_reg,
793 rdev->qpshift, rdev->qpmask, 824 rdev->qpshift, rdev->qpmask,
@@ -1355,7 +1386,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1355 t4_sq_host_wq_pidx(&qp->wq), 1386 t4_sq_host_wq_pidx(&qp->wq),
1356 t4_sq_wq_size(&qp->wq)); 1387 t4_sq_wq_size(&qp->wq));
1357 if (ret) { 1388 if (ret) {
1358 pr_err(KERN_ERR MOD "%s: Fatal error - " 1389 pr_err(MOD "%s: Fatal error - "
1359 "DB overflow recovery failed - " 1390 "DB overflow recovery failed - "
1360 "error syncing SQ qid %u\n", 1391 "error syncing SQ qid %u\n",
1361 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 1392 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
@@ -1371,7 +1402,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1371 t4_rq_wq_size(&qp->wq)); 1402 t4_rq_wq_size(&qp->wq));
1372 1403
1373 if (ret) { 1404 if (ret) {
1374 pr_err(KERN_ERR MOD "%s: Fatal error - " 1405 pr_err(MOD "%s: Fatal error - "
1375 "DB overflow recovery failed - " 1406 "DB overflow recovery failed - "
1376 "error syncing RQ qid %u\n", 1407 "error syncing RQ qid %u\n",
1377 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 1408 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index d87e1650f643..97bb5550a6cf 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -137,6 +137,7 @@ struct c4iw_stats {
137 u64 tcam_full; 137 u64 tcam_full;
138 u64 act_ofld_conn_fails; 138 u64 act_ofld_conn_fails;
139 u64 pas_ofld_conn_fails; 139 u64 pas_ofld_conn_fails;
140 u64 neg_adv;
140}; 141};
141 142
142struct c4iw_hw_queue { 143struct c4iw_hw_queue {
@@ -814,6 +815,11 @@ struct c4iw_listen_ep {
814 int backlog; 815 int backlog;
815}; 816};
816 817
818struct c4iw_ep_stats {
819 unsigned connect_neg_adv;
820 unsigned abort_neg_adv;
821};
822
817struct c4iw_ep { 823struct c4iw_ep {
818 struct c4iw_ep_common com; 824 struct c4iw_ep_common com;
819 struct c4iw_ep *parent_ep; 825 struct c4iw_ep *parent_ep;
@@ -846,6 +852,7 @@ struct c4iw_ep {
846 unsigned int retry_count; 852 unsigned int retry_count;
847 int snd_win; 853 int snd_win;
848 int rcv_win; 854 int rcv_win;
855 struct c4iw_ep_stats stats;
849}; 856};
850 857
851static inline void print_addr(struct c4iw_ep_common *epc, const char *func, 858static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 3ef0cf9f5c44..cff815b91707 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
144 if (i == (num_wqe-1)) { 144 if (i == (num_wqe-1)) {
145 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | 145 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
146 FW_WR_COMPL_F); 146 FW_WR_COMPL_F);
147 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; 147 req->wr.wr_lo = (__force __be64)&wr_wait;
148 } else 148 } else
149 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); 149 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
150 req->wr.wr_mid = cpu_to_be32( 150 req->wr.wr_mid = cpu_to_be32(
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
676 mhp->attr.zbva = 0; 676 mhp->attr.zbva = 0;
677 mhp->attr.va_fbo = 0; 677 mhp->attr.va_fbo = 0;
678 mhp->attr.page_size = 0; 678 mhp->attr.page_size = 0;
679 mhp->attr.len = ~0UL; 679 mhp->attr.len = ~0ULL;
680 mhp->attr.pbl_size = 0; 680 mhp->attr.pbl_size = 0;
681 681
682 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 682 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
683 FW_RI_STAG_NSMR, mhp->attr.perms, 683 FW_RI_STAG_NSMR, mhp->attr.perms,
684 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); 684 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
685 if (ret) 685 if (ret)
686 goto err1; 686 goto err1;
687 687
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 15cae5a31018..389ced335bc5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
275 FW_RI_RES_WR_NRES_V(2) | 275 FW_RI_RES_WR_NRES_V(2) |
276 FW_WR_COMPL_F); 276 FW_WR_COMPL_F);
277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
278 res_wr->cookie = (unsigned long) &wr_wait; 278 res_wr->cookie = (uintptr_t)&wr_wait;
279 res = res_wr->res; 279 res = res_wr->res;
280 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; 280 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
281 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 281 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1209 wqe->flowid_len16 = cpu_to_be32( 1209 wqe->flowid_len16 = cpu_to_be32(
1210 FW_WR_FLOWID_V(ep->hwtid) | 1210 FW_WR_FLOWID_V(ep->hwtid) |
1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); 1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1212 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1212 wqe->cookie = (uintptr_t)&ep->com.wr_wait;
1213 1213
1214 wqe->u.fini.type = FW_RI_TYPE_FINI; 1214 wqe->u.fini.type = FW_RI_TYPE_FINI;
1215 ret = c4iw_ofld_send(&rhp->rdev, skb); 1215 ret = c4iw_ofld_send(&rhp->rdev, skb);
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1279 FW_WR_FLOWID_V(qhp->ep->hwtid) | 1279 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); 1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1281 1281
1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; 1282 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
1283 1283
1284 wqe->u.init.type = FW_RI_TYPE_INIT; 1284 wqe->u.init.type = FW_RI_TYPE_INIT;
1285 wqe->u.init.mpareqbit_p2ptype = 1285 wqe->u.init.mpareqbit_p2ptype =
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); 1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1767 insert_mmap(ucontext, mm2); 1767 insert_mmap(ucontext, mm2);
1768 mm3->key = uresp.sq_db_gts_key; 1768 mm3->key = uresp.sq_db_gts_key;
1769 mm3->addr = (__force unsigned long) qhp->wq.sq.udb; 1769 mm3->addr = (__force unsigned long)qhp->wq.sq.udb;
1770 mm3->len = PAGE_SIZE; 1770 mm3->len = PAGE_SIZE;
1771 insert_mmap(ucontext, mm3); 1771 insert_mmap(ucontext, mm3);
1772 mm4->key = uresp.rq_db_gts_key; 1772 mm4->key = uresp.rq_db_gts_key;
1773 mm4->addr = (__force unsigned long) qhp->wq.rq.udb; 1773 mm4->addr = (__force unsigned long)qhp->wq.rq.udb;
1774 mm4->len = PAGE_SIZE; 1774 mm4->len = PAGE_SIZE;
1775 insert_mmap(ucontext, mm4); 1775 insert_mmap(ucontext, mm4);
1776 if (mm5) { 1776 if (mm5) {
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 871cdcac7be2..7f2a6c244d25 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -539,6 +539,7 @@ struct t4_cq {
539 size_t memsize; 539 size_t memsize;
540 __be64 bits_type_ts; 540 __be64 bits_type_ts;
541 u32 cqid; 541 u32 cqid;
542 u32 qid_mask;
542 int vector; 543 int vector;
543 u16 size; /* including status page */ 544 u16 size; /* including status page */
544 u16 cidx; 545 u16 cidx;
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
563 set_bit(CQ_ARMED, &cq->flags); 564 set_bit(CQ_ARMED, &cq->flags);
564 while (cq->cidx_inc > CIDXINC_M) { 565 while (cq->cidx_inc > CIDXINC_M) {
565 val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | 566 val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
566 INGRESSQID_V(cq->cqid); 567 INGRESSQID_V(cq->cqid & cq->qid_mask);
567 writel(val, cq->gts); 568 writel(val, cq->gts);
568 cq->cidx_inc -= CIDXINC_M; 569 cq->cidx_inc -= CIDXINC_M;
569 } 570 }
570 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | 571 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
571 INGRESSQID_V(cq->cqid); 572 INGRESSQID_V(cq->cqid & cq->qid_mask);
572 writel(val, cq->gts); 573 writel(val, cq->gts);
573 cq->cidx_inc = 0; 574 cq->cidx_inc = 0;
574 return 0; 575 return 0;
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq)
601 u32 val; 602 u32 val;
602 603
603 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | 604 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
604 INGRESSQID_V(cq->cqid); 605 INGRESSQID_V(cq->cqid & cq->qid_mask);
605 writel(val, cq->gts); 606 writel(val, cq->gts);
606 cq->cidx_inc = 0; 607 cq->cidx_inc = 0;
607 } 608 }
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5e53327fc647..343e8daf2270 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */
848#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) 848#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
849#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) 849#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
850 850
851#define CONG_CNTRL_VALID (1 << 18) 851#define T5_ISS_S 18
852#define T5_ISS_V(x) ((x) << T5_ISS_S)
853#define T5_ISS_F T5_ISS_V(1U)
852 854
853#endif /* _T4FW_RI_API_H_ */ 855#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
index 120aedf9f989..cec181532924 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -77,7 +77,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
80 memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); 80 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
81 81
82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); 82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
83 interface_id = be64_to_cpu(my_gid.global.interface_id); 83 interface_id = be64_to_cpu(my_gid.global.interface_id);
@@ -114,7 +114,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); 117 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
118 118
119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); 119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
120 interface_id = be64_to_cpu(my_gid.global.interface_id); 120 interface_id = be64_to_cpu(my_gid.global.interface_id);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57070c529dfb..cc64400d41ac 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1569,8 +1569,7 @@ static void reset_gids_task(struct work_struct *work)
1569 MLX4_CMD_TIME_CLASS_B, 1569 MLX4_CMD_TIME_CLASS_B,
1570 MLX4_CMD_WRAPPED); 1570 MLX4_CMD_WRAPPED);
1571 if (err) 1571 if (err)
1572 pr_warn(KERN_WARNING 1572 pr_warn("set port %d command failed\n", gw->port);
1573 "set port %d command failed\n", gw->port);
1574 } 1573 }
1575 1574
1576 mlx4_free_cmd_mailbox(dev, mailbox); 1575 mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 4d7024b899cb..d35f62d4f4c5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1392,7 +1392,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1392 1392
1393 if (ah->ah_flags & IB_AH_GRH) { 1393 if (ah->ah_flags & IB_AH_GRH) {
1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { 1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1395 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1395 pr_err("sgid_index (%u) too large. max is %d\n",
1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len); 1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1397 return -EINVAL; 1397 return -EINVAL;
1398 } 1398 }
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b2a6dc8ea99..9f9d5c563a61 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = {
116 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 116 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
117 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 117 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
118 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 118 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
119 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
119 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, 120 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
120 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, 121 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
121 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} 122 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6f09a72e78d7..72b43417cbe3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic,
596 memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); 596 memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
597} 597}
598 598
599static void record_sockaddr_info(struct sockaddr_storage *addr_info,
600 nes_addr_t *ip_addr, u16 *port_num)
601{
602 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
603
604 if (in_addr->sin_family == AF_INET) {
605 *ip_addr = ntohl(in_addr->sin_addr.s_addr);
606 *port_num = ntohs(in_addr->sin_port);
607 }
608}
609
599/* 610/*
600 * nes_record_pm_msg - Save the received mapping info 611 * nes_record_pm_msg - Save the received mapping info
601 */ 612 */
602static void nes_record_pm_msg(struct nes_cm_info *cm_info, 613static void nes_record_pm_msg(struct nes_cm_info *cm_info,
603 struct iwpm_sa_data *pm_msg) 614 struct iwpm_sa_data *pm_msg)
604{ 615{
605 struct sockaddr_in *mapped_loc_addr = 616 record_sockaddr_info(&pm_msg->mapped_loc_addr,
606 (struct sockaddr_in *)&pm_msg->mapped_loc_addr; 617 &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
607 struct sockaddr_in *mapped_rem_addr = 618
608 (struct sockaddr_in *)&pm_msg->mapped_rem_addr; 619 record_sockaddr_info(&pm_msg->mapped_rem_addr,
609 620 &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
610 if (mapped_loc_addr->sin_family == AF_INET) { 621}
611 cm_info->mapped_loc_addr = 622
612 ntohl(mapped_loc_addr->sin_addr.s_addr); 623/*
613 cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); 624 * nes_get_reminfo - Get the address info of the remote connecting peer
614 } 625 */
615 if (mapped_rem_addr->sin_family == AF_INET) { 626static int nes_get_remote_addr(struct nes_cm_node *cm_node)
616 cm_info->mapped_rem_addr = 627{
617 ntohl(mapped_rem_addr->sin_addr.s_addr); 628 struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
618 cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); 629 struct sockaddr_storage remote_addr;
619 } 630 int ret;
631
632 nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
633 htons(cm_node->mapped_loc_port), &mapped_loc_addr);
634 nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
635 htons(cm_node->mapped_rem_port), &mapped_rem_addr);
636
637 ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
638 &remote_addr, RDMA_NL_NES);
639 if (ret)
640 nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
641 else
642 record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
643 &cm_node->rem_port);
644 return ret;
620} 645}
621 646
622/** 647/**
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1566 return NULL; 1591 return NULL;
1567 1592
1568 /* set our node specific transport info */ 1593 /* set our node specific transport info */
1569 cm_node->loc_addr = cm_info->loc_addr; 1594 if (listener) {
1595 cm_node->loc_addr = listener->loc_addr;
1596 cm_node->loc_port = listener->loc_port;
1597 } else {
1598 cm_node->loc_addr = cm_info->loc_addr;
1599 cm_node->loc_port = cm_info->loc_port;
1600 }
1570 cm_node->rem_addr = cm_info->rem_addr; 1601 cm_node->rem_addr = cm_info->rem_addr;
1571 cm_node->loc_port = cm_info->loc_port;
1572 cm_node->rem_port = cm_info->rem_port; 1602 cm_node->rem_port = cm_info->rem_port;
1573 1603
1574 cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; 1604 cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
2151 cm_node->state = NES_CM_STATE_ESTABLISHED; 2181 cm_node->state = NES_CM_STATE_ESTABLISHED;
2152 if (datasize) { 2182 if (datasize) {
2153 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 2183 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2184 nes_get_remote_addr(cm_node);
2154 handle_rcv_mpa(cm_node, skb); 2185 handle_rcv_mpa(cm_node, skb);
2155 } else { /* rcvd ACK only */ 2186 } else { /* rcvd ACK only */
2156 dev_kfree_skb_any(skb); 2187 dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index ffd48bfc4923..7df16f74bb45 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -903,7 +903,7 @@ struct qib_devdata {
903 /* PCI Device ID (here for NodeInfo) */ 903 /* PCI Device ID (here for NodeInfo) */
904 u16 deviceid; 904 u16 deviceid;
905 /* for write combining settings */ 905 /* for write combining settings */
906 unsigned long wc_cookie; 906 int wc_cookie;
907 unsigned long wc_base; 907 unsigned long wc_base;
908 unsigned long wc_len; 908 unsigned long wc_len;
909 909
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit);
1136extern u32 qib_cpulist_count; 1136extern u32 qib_cpulist_count;
1137extern unsigned long *qib_cpulist; 1137extern unsigned long *qib_cpulist;
1138 1138
1139extern unsigned qib_wc_pat;
1140extern unsigned qib_cc_table_size; 1139extern unsigned qib_cc_table_size;
1141int qib_init(struct qib_devdata *, int); 1140int qib_init(struct qib_devdata *, int);
1142int init_chip_wc_pat(struct qib_devdata *dd, u32); 1141int init_chip_wc_pat(struct qib_devdata *dd, u32);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 9ea6c440a00c..725881890c4a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
835 vma->vm_flags &= ~VM_MAYREAD; 835 vma->vm_flags &= ~VM_MAYREAD;
836 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 836 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
837 837
838 if (qib_wc_pat) 838 /* We used PAT if wc_cookie == 0 */
839 if (!dd->wc_cookie)
839 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 840 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
840 841
841 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 842 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 0d2ba59af30a..4b927809d1a1 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd)
3315 qib_6120_config_ctxts(dd); 3315 qib_6120_config_ctxts(dd);
3316 qib_set_ctxtcnt(dd); 3316 qib_set_ctxtcnt(dd);
3317 3317
3318 if (qib_wc_pat) { 3318 ret = init_chip_wc_pat(dd, 0);
3319 ret = init_chip_wc_pat(dd, 0); 3319 if (ret)
3320 if (ret) 3320 goto bail;
3321 goto bail;
3322 }
3323 set_6120_baseaddrs(dd); /* set chip access pointers now */ 3321 set_6120_baseaddrs(dd); /* set chip access pointers now */
3324 3322
3325 ret = 0; 3323 ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 22affda8af88..00b2af211157 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
4126 qib_7220_config_ctxts(dd); 4126 qib_7220_config_ctxts(dd);
4127 qib_set_ctxtcnt(dd); /* needed for PAT setup */ 4127 qib_set_ctxtcnt(dd); /* needed for PAT setup */
4128 4128
4129 if (qib_wc_pat) { 4129 ret = init_chip_wc_pat(dd, 0);
4130 ret = init_chip_wc_pat(dd, 0); 4130 if (ret)
4131 if (ret) 4131 goto bail;
4132 goto bail;
4133 }
4134 set_7220_baseaddrs(dd); /* set chip access pointers now */ 4132 set_7220_baseaddrs(dd); /* set chip access pointers now */
4135 4133
4136 ret = 0; 4134 ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ef97b71c8f7d..f32b4628e991 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6429 unsigned features, pidx, sbufcnt; 6429 unsigned features, pidx, sbufcnt;
6430 int ret, mtu; 6430 int ret, mtu;
6431 u32 sbufs, updthresh; 6431 u32 sbufs, updthresh;
6432 resource_size_t vl15off;
6432 6433
6433 /* pport structs are contiguous, allocated after devdata */ 6434 /* pport structs are contiguous, allocated after devdata */
6434 ppd = (struct qib_pportdata *)(dd + 1); 6435 ppd = (struct qib_pportdata *)(dd + 1);
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6677 qib_7322_config_ctxts(dd); 6678 qib_7322_config_ctxts(dd);
6678 qib_set_ctxtcnt(dd); 6679 qib_set_ctxtcnt(dd);
6679 6680
6680 if (qib_wc_pat) { 6681 /*
6681 resource_size_t vl15off; 6682 * We do not set WC on the VL15 buffers to avoid
6682 /* 6683 * a rare problem with unaligned writes from
6683 * We do not set WC on the VL15 buffers to avoid 6684 * interrupt-flushed store buffers, so we need
6684 * a rare problem with unaligned writes from 6685 * to map those separately here. We can't solve
6685 * interrupt-flushed store buffers, so we need 6686 * this for the rarely used mtrr case.
6686 * to map those separately here. We can't solve 6687 */
6687 * this for the rarely used mtrr case. 6688 ret = init_chip_wc_pat(dd, 0);
6688 */ 6689 if (ret)
6689 ret = init_chip_wc_pat(dd, 0); 6690 goto bail;
6690 if (ret)
6691 goto bail;
6692 6691
6693 /* vl15 buffers start just after the 4k buffers */ 6692 /* vl15 buffers start just after the 4k buffers */
6694 vl15off = dd->physaddr + (dd->piobufbase >> 32) + 6693 vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6695 dd->piobcnt4k * dd->align4k; 6694 dd->piobcnt4k * dd->align4k;
6696 dd->piovl15base = ioremap_nocache(vl15off, 6695 dd->piovl15base = ioremap_nocache(vl15off,
6697 NUM_VL15_BUFS * dd->align4k); 6696 NUM_VL15_BUFS * dd->align4k);
6698 if (!dd->piovl15base) { 6697 if (!dd->piovl15base) {
6699 ret = -ENOMEM; 6698 ret = -ENOMEM;
6700 goto bail; 6699 goto bail;
6701 }
6702 } 6700 }
6701
6703 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ 6702 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6704 6703
6705 ret = 0; 6704 ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 2ee36953e234..7e00470adc30 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
91unsigned qib_cc_table_size; 91unsigned qib_cc_table_size;
92module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); 92module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
93MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); 93MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
94/*
95 * qib_wc_pat parameter:
96 * 0 is WC via MTRR
97 * 1 is WC via PAT
98 * If PAT initialization fails, code reverts back to MTRR
99 */
100unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
101module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
102MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
103 94
104static void verify_interrupt(unsigned long); 95static void verify_interrupt(unsigned long);
105 96
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
1377 spin_unlock(&dd->pport[pidx].cc_shadow_lock); 1368 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1378 } 1369 }
1379 1370
1380 if (!qib_wc_pat) 1371 qib_disable_wc(dd);
1381 qib_disable_wc(dd);
1382 1372
1383 if (dd->pioavailregs_dma) { 1373 if (dd->pioavailregs_dma) {
1384 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1374 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1547 goto bail; 1537 goto bail;
1548 } 1538 }
1549 1539
1550 if (!qib_wc_pat) { 1540 ret = qib_enable_wc(dd);
1551 ret = qib_enable_wc(dd); 1541 if (ret) {
1552 if (ret) { 1542 qib_dev_err(dd,
1553 qib_dev_err(dd, 1543 "Write combining not enabled (err %d): performance may be poor\n",
1554 "Write combining not enabled (err %d): performance may be poor\n", 1544 -ret);
1555 -ret); 1545 ret = 0;
1556 ret = 0;
1557 }
1558 } 1546 }
1559 1547
1560 qib_verify_pioperf(dd); 1548 qib_verify_pioperf(dd);
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 81b225f2300a..edd0ddbd4481 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -116,21 +116,10 @@ int qib_enable_wc(struct qib_devdata *dd)
116 } 116 }
117 117
118 if (!ret) { 118 if (!ret) {
119 int cookie; 119 dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
120 120 if (dd->wc_cookie < 0)
121 cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); 121 /* use error from routine */
122 if (cookie < 0) { 122 ret = dd->wc_cookie;
123 {
124 qib_devinfo(dd->pcidev,
125 "mtrr_add() WC for PIO bufs failed (%d)\n",
126 cookie);
127 ret = -EINVAL;
128 }
129 } else {
130 dd->wc_cookie = cookie;
131 dd->wc_base = (unsigned long) pioaddr;
132 dd->wc_len = (unsigned long) piolen;
133 }
134 } 123 }
135 124
136 return ret; 125 return ret;
@@ -142,18 +131,7 @@ int qib_enable_wc(struct qib_devdata *dd)
142 */ 131 */
143void qib_disable_wc(struct qib_devdata *dd) 132void qib_disable_wc(struct qib_devdata *dd)
144{ 133{
145 if (dd->wc_cookie) { 134 arch_phys_wc_del(dd->wc_cookie);
146 int r;
147
148 r = mtrr_del(dd->wc_cookie, dd->wc_base,
149 dd->wc_len);
150 if (r < 0)
151 qib_devinfo(dd->pcidev,
152 "mtrr_del(%lx, %lx, %lx) failed: %d\n",
153 dd->wc_cookie, dd->wc_base,
154 dd->wc_len, r);
155 dd->wc_cookie = 0; /* even on failure */
156 }
157} 135}
158 136
159/** 137/**
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..cf32a778e7d0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
386 rx->rx_ring[i].mapping, 386 rx->rx_ring[i].mapping,
387 GFP_KERNEL)) { 387 GFP_KERNEL)) {
388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
389 ret = -ENOMEM; 389 ret = -ENOMEM;
390 goto err_count; 390 goto err_count;
391 } 391 }
392 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); 392 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
393 if (ret) { 393 if (ret) {
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index a1cbba9056fd..3465faf1809e 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
266 266
267static void put_pasid_state_wait(struct pasid_state *pasid_state) 267static void put_pasid_state_wait(struct pasid_state *pasid_state)
268{ 268{
269 atomic_dec(&pasid_state->count);
269 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); 270 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
270 free_pasid_state(pasid_state); 271 free_pasid_state(pasid_state);
271} 272}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9f7e1d34a32b..66a803b9dd3a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -224,14 +224,7 @@
224#define RESUME_TERMINATE (1 << 0) 224#define RESUME_TERMINATE (1 << 0)
225 225
226#define TTBCR2_SEP_SHIFT 15 226#define TTBCR2_SEP_SHIFT 15
227#define TTBCR2_SEP_MASK 0x7 227#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
228
229#define TTBCR2_ADDR_32 0
230#define TTBCR2_ADDR_36 1
231#define TTBCR2_ADDR_40 2
232#define TTBCR2_ADDR_42 3
233#define TTBCR2_ADDR_44 4
234#define TTBCR2_ADDR_48 5
235 228
236#define TTBRn_HI_ASID_SHIFT 16 229#define TTBRn_HI_ASID_SHIFT 16
237 230
@@ -793,26 +786,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
793 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); 786 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
794 if (smmu->version > ARM_SMMU_V1) { 787 if (smmu->version > ARM_SMMU_V1) {
795 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; 788 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
796 switch (smmu->va_size) { 789 reg |= TTBCR2_SEP_UPSTREAM;
797 case 32:
798 reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
799 break;
800 case 36:
801 reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
802 break;
803 case 40:
804 reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
805 break;
806 case 42:
807 reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
808 break;
809 case 44:
810 reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
811 break;
812 case 48:
813 reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
814 break;
815 }
816 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); 790 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
817 } 791 }
818 } else { 792 } else {
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4015560bf486..cab214544237 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1004,20 +1004,18 @@ static int rk_iommu_remove(struct platform_device *pdev)
1004 return 0; 1004 return 0;
1005} 1005}
1006 1006
1007#ifdef CONFIG_OF
1008static const struct of_device_id rk_iommu_dt_ids[] = { 1007static const struct of_device_id rk_iommu_dt_ids[] = {
1009 { .compatible = "rockchip,iommu" }, 1008 { .compatible = "rockchip,iommu" },
1010 { /* sentinel */ } 1009 { /* sentinel */ }
1011}; 1010};
1012MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); 1011MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1013#endif
1014 1012
1015static struct platform_driver rk_iommu_driver = { 1013static struct platform_driver rk_iommu_driver = {
1016 .probe = rk_iommu_probe, 1014 .probe = rk_iommu_probe,
1017 .remove = rk_iommu_remove, 1015 .remove = rk_iommu_remove,
1018 .driver = { 1016 .driver = {
1019 .name = "rk_iommu", 1017 .name = "rk_iommu",
1020 .of_match_table = of_match_ptr(rk_iommu_dt_ids), 1018 .of_match_table = rk_iommu_dt_ids,
1021 }, 1019 },
1022}; 1020};
1023 1021
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7b315e385ba3..01999d74bd3a 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -82,19 +82,6 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
82#define NR_GIC_CPU_IF 8 82#define NR_GIC_CPU_IF 8
83static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; 83static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
84 84
85/*
86 * Supported arch specific GIC irq extension.
87 * Default make them NULL.
88 */
89struct irq_chip gic_arch_extn = {
90 .irq_eoi = NULL,
91 .irq_mask = NULL,
92 .irq_unmask = NULL,
93 .irq_retrigger = NULL,
94 .irq_set_type = NULL,
95 .irq_set_wake = NULL,
96};
97
98#ifndef MAX_GIC_NR 85#ifndef MAX_GIC_NR
99#define MAX_GIC_NR 1 86#define MAX_GIC_NR 1
100#endif 87#endif
@@ -167,34 +154,16 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
167 154
168static void gic_mask_irq(struct irq_data *d) 155static void gic_mask_irq(struct irq_data *d)
169{ 156{
170 unsigned long flags;
171
172 raw_spin_lock_irqsave(&irq_controller_lock, flags);
173 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); 157 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
174 if (gic_arch_extn.irq_mask)
175 gic_arch_extn.irq_mask(d);
176 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
177} 158}
178 159
179static void gic_unmask_irq(struct irq_data *d) 160static void gic_unmask_irq(struct irq_data *d)
180{ 161{
181 unsigned long flags;
182
183 raw_spin_lock_irqsave(&irq_controller_lock, flags);
184 if (gic_arch_extn.irq_unmask)
185 gic_arch_extn.irq_unmask(d);
186 gic_poke_irq(d, GIC_DIST_ENABLE_SET); 162 gic_poke_irq(d, GIC_DIST_ENABLE_SET);
187 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
188} 163}
189 164
190static void gic_eoi_irq(struct irq_data *d) 165static void gic_eoi_irq(struct irq_data *d)
191{ 166{
192 if (gic_arch_extn.irq_eoi) {
193 raw_spin_lock(&irq_controller_lock);
194 gic_arch_extn.irq_eoi(d);
195 raw_spin_unlock(&irq_controller_lock);
196 }
197
198 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); 167 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
199} 168}
200 169
@@ -251,8 +220,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
251{ 220{
252 void __iomem *base = gic_dist_base(d); 221 void __iomem *base = gic_dist_base(d);
253 unsigned int gicirq = gic_irq(d); 222 unsigned int gicirq = gic_irq(d);
254 unsigned long flags;
255 int ret;
256 223
257 /* Interrupt configuration for SGIs can't be changed */ 224 /* Interrupt configuration for SGIs can't be changed */
258 if (gicirq < 16) 225 if (gicirq < 16)
@@ -263,25 +230,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
263 type != IRQ_TYPE_EDGE_RISING) 230 type != IRQ_TYPE_EDGE_RISING)
264 return -EINVAL; 231 return -EINVAL;
265 232
266 raw_spin_lock_irqsave(&irq_controller_lock, flags); 233 return gic_configure_irq(gicirq, type, base, NULL);
267
268 if (gic_arch_extn.irq_set_type)
269 gic_arch_extn.irq_set_type(d, type);
270
271 ret = gic_configure_irq(gicirq, type, base, NULL);
272
273 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
274
275 return ret;
276}
277
278static int gic_retrigger(struct irq_data *d)
279{
280 if (gic_arch_extn.irq_retrigger)
281 return gic_arch_extn.irq_retrigger(d);
282
283 /* the genirq layer expects 0 if we can't retrigger in hardware */
284 return 0;
285} 234}
286 235
287#ifdef CONFIG_SMP 236#ifdef CONFIG_SMP
@@ -312,21 +261,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
312} 261}
313#endif 262#endif
314 263
315#ifdef CONFIG_PM
316static int gic_set_wake(struct irq_data *d, unsigned int on)
317{
318 int ret = -ENXIO;
319
320 if (gic_arch_extn.irq_set_wake)
321 ret = gic_arch_extn.irq_set_wake(d, on);
322
323 return ret;
324}
325
326#else
327#define gic_set_wake NULL
328#endif
329
330static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 264static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
331{ 265{
332 u32 irqstat, irqnr; 266 u32 irqstat, irqnr;
@@ -385,11 +319,9 @@ static struct irq_chip gic_chip = {
385 .irq_unmask = gic_unmask_irq, 319 .irq_unmask = gic_unmask_irq,
386 .irq_eoi = gic_eoi_irq, 320 .irq_eoi = gic_eoi_irq,
387 .irq_set_type = gic_set_type, 321 .irq_set_type = gic_set_type,
388 .irq_retrigger = gic_retrigger,
389#ifdef CONFIG_SMP 322#ifdef CONFIG_SMP
390 .irq_set_affinity = gic_set_affinity, 323 .irq_set_affinity = gic_set_affinity,
391#endif 324#endif
392 .irq_set_wake = gic_set_wake,
393 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 325 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
394 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 326 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
395}; 327};
@@ -1055,7 +987,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
1055 set_handle_irq(gic_handle_irq); 987 set_handle_irq(gic_handle_irq);
1056 } 988 }
1057 989
1058 gic_chip.flags |= gic_arch_extn.flags;
1059 gic_dist_init(gic); 990 gic_dist_init(gic);
1060 gic_cpu_init(gic); 991 gic_cpu_init(gic);
1061 gic_pm_init(gic); 992 gic_pm_init(gic);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9eeea196328a..5503e43e5f28 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc,
925 925
926 switch (r) { 926 switch (r) {
927 /* async */ 927 /* async */
928 case -EINPROGRESS:
929 case -EBUSY: 928 case -EBUSY:
930 wait_for_completion(&ctx->restart); 929 wait_for_completion(&ctx->restart);
931 reinit_completion(&ctx->restart); 930 reinit_completion(&ctx->restart);
931 /* fall through*/
932 case -EINPROGRESS:
932 ctx->req = NULL; 933 ctx->req = NULL;
933 ctx->cc_sector++; 934 ctx->cc_sector++;
934 continue; 935 continue;
@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
1345 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); 1346 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1346 struct crypt_config *cc = io->cc; 1347 struct crypt_config *cc = io->cc;
1347 1348
1348 if (error == -EINPROGRESS) 1349 if (error == -EINPROGRESS) {
1350 complete(&ctx->restart);
1349 return; 1351 return;
1352 }
1350 1353
1351 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) 1354 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1352 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); 1355 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
1357 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1360 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1358 1361
1359 if (!atomic_dec_and_test(&ctx->cc_pending)) 1362 if (!atomic_dec_and_test(&ctx->cc_pending))
1360 goto done; 1363 return;
1361 1364
1362 if (bio_data_dir(io->base_bio) == READ) 1365 if (bio_data_dir(io->base_bio) == READ)
1363 kcryptd_crypt_read_done(io); 1366 kcryptd_crypt_read_done(io);
1364 else 1367 else
1365 kcryptd_crypt_write_io_submit(io, 1); 1368 kcryptd_crypt_write_io_submit(io, 1);
1366done:
1367 if (!completion_done(&ctx->restart))
1368 complete(&ctx->restart);
1369} 1369}
1370 1370
1371static void kcryptd_crypt(struct work_struct *work) 1371static void kcryptd_crypt(struct work_struct *work)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d4f31e195e26..593a02476c78 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4818,12 +4818,12 @@ static void md_free(struct kobject *ko)
4818 if (mddev->sysfs_state) 4818 if (mddev->sysfs_state)
4819 sysfs_put(mddev->sysfs_state); 4819 sysfs_put(mddev->sysfs_state);
4820 4820
4821 if (mddev->queue)
4822 blk_cleanup_queue(mddev->queue);
4821 if (mddev->gendisk) { 4823 if (mddev->gendisk) {
4822 del_gendisk(mddev->gendisk); 4824 del_gendisk(mddev->gendisk);
4823 put_disk(mddev->gendisk); 4825 put_disk(mddev->gendisk);
4824 } 4826 }
4825 if (mddev->queue)
4826 blk_cleanup_queue(mddev->queue);
4827 4827
4828 kfree(mddev); 4828 kfree(mddev);
4829} 4829}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2cb59a641cd2..6a68ef5246d4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -188,8 +188,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
188 } 188 }
189 dev[j] = rdev1; 189 dev[j] = rdev1;
190 190
191 disk_stack_limits(mddev->gendisk, rdev1->bdev, 191 if (mddev->queue)
192 rdev1->data_offset << 9); 192 disk_stack_limits(mddev->gendisk, rdev1->bdev,
193 rdev1->data_offset << 9);
193 194
194 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) 195 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
195 conf->has_merge_bvec = 1; 196 conf->has_merge_bvec = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 77dfd720aaa0..1ba97fdc6df1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1078,9 +1078,6 @@ again:
1078 pr_debug("skip op %ld on disc %d for sector %llu\n", 1078 pr_debug("skip op %ld on disc %d for sector %llu\n",
1079 bi->bi_rw, i, (unsigned long long)sh->sector); 1079 bi->bi_rw, i, (unsigned long long)sh->sector);
1080 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1080 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1081 if (sh->batch_head)
1082 set_bit(STRIPE_BATCH_ERR,
1083 &sh->batch_head->state);
1084 set_bit(STRIPE_HANDLE, &sh->state); 1081 set_bit(STRIPE_HANDLE, &sh->state);
1085 } 1082 }
1086 1083
@@ -1971,17 +1968,30 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1971 put_cpu(); 1968 put_cpu();
1972} 1969}
1973 1970
1971static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
1972{
1973 struct stripe_head *sh;
1974
1975 sh = kmem_cache_zalloc(sc, gfp);
1976 if (sh) {
1977 spin_lock_init(&sh->stripe_lock);
1978 spin_lock_init(&sh->batch_lock);
1979 INIT_LIST_HEAD(&sh->batch_list);
1980 INIT_LIST_HEAD(&sh->lru);
1981 atomic_set(&sh->count, 1);
1982 }
1983 return sh;
1984}
1974static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 1985static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
1975{ 1986{
1976 struct stripe_head *sh; 1987 struct stripe_head *sh;
1977 sh = kmem_cache_zalloc(conf->slab_cache, gfp); 1988
1989 sh = alloc_stripe(conf->slab_cache, gfp);
1978 if (!sh) 1990 if (!sh)
1979 return 0; 1991 return 0;
1980 1992
1981 sh->raid_conf = conf; 1993 sh->raid_conf = conf;
1982 1994
1983 spin_lock_init(&sh->stripe_lock);
1984
1985 if (grow_buffers(sh, gfp)) { 1995 if (grow_buffers(sh, gfp)) {
1986 shrink_buffers(sh); 1996 shrink_buffers(sh);
1987 kmem_cache_free(conf->slab_cache, sh); 1997 kmem_cache_free(conf->slab_cache, sh);
@@ -1990,13 +2000,8 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
1990 sh->hash_lock_index = 2000 sh->hash_lock_index =
1991 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 2001 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
1992 /* we just created an active stripe so... */ 2002 /* we just created an active stripe so... */
1993 atomic_set(&sh->count, 1);
1994 atomic_inc(&conf->active_stripes); 2003 atomic_inc(&conf->active_stripes);
1995 INIT_LIST_HEAD(&sh->lru);
1996 2004
1997 spin_lock_init(&sh->batch_lock);
1998 INIT_LIST_HEAD(&sh->batch_list);
1999 sh->batch_head = NULL;
2000 release_stripe(sh); 2005 release_stripe(sh);
2001 conf->max_nr_stripes++; 2006 conf->max_nr_stripes++;
2002 return 1; 2007 return 1;
@@ -2060,6 +2065,35 @@ static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
2060 return ret; 2065 return ret;
2061} 2066}
2062 2067
2068static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2069{
2070 unsigned long cpu;
2071 int err = 0;
2072
2073 mddev_suspend(conf->mddev);
2074 get_online_cpus();
2075 for_each_present_cpu(cpu) {
2076 struct raid5_percpu *percpu;
2077 struct flex_array *scribble;
2078
2079 percpu = per_cpu_ptr(conf->percpu, cpu);
2080 scribble = scribble_alloc(new_disks,
2081 new_sectors / STRIPE_SECTORS,
2082 GFP_NOIO);
2083
2084 if (scribble) {
2085 flex_array_free(percpu->scribble);
2086 percpu->scribble = scribble;
2087 } else {
2088 err = -ENOMEM;
2089 break;
2090 }
2091 }
2092 put_online_cpus();
2093 mddev_resume(conf->mddev);
2094 return err;
2095}
2096
2063static int resize_stripes(struct r5conf *conf, int newsize) 2097static int resize_stripes(struct r5conf *conf, int newsize)
2064{ 2098{
2065 /* Make all the stripes able to hold 'newsize' devices. 2099 /* Make all the stripes able to hold 'newsize' devices.
@@ -2088,7 +2122,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2088 struct stripe_head *osh, *nsh; 2122 struct stripe_head *osh, *nsh;
2089 LIST_HEAD(newstripes); 2123 LIST_HEAD(newstripes);
2090 struct disk_info *ndisks; 2124 struct disk_info *ndisks;
2091 unsigned long cpu;
2092 int err; 2125 int err;
2093 struct kmem_cache *sc; 2126 struct kmem_cache *sc;
2094 int i; 2127 int i;
@@ -2109,13 +2142,11 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2109 return -ENOMEM; 2142 return -ENOMEM;
2110 2143
2111 for (i = conf->max_nr_stripes; i; i--) { 2144 for (i = conf->max_nr_stripes; i; i--) {
2112 nsh = kmem_cache_zalloc(sc, GFP_KERNEL); 2145 nsh = alloc_stripe(sc, GFP_KERNEL);
2113 if (!nsh) 2146 if (!nsh)
2114 break; 2147 break;
2115 2148
2116 nsh->raid_conf = conf; 2149 nsh->raid_conf = conf;
2117 spin_lock_init(&nsh->stripe_lock);
2118
2119 list_add(&nsh->lru, &newstripes); 2150 list_add(&nsh->lru, &newstripes);
2120 } 2151 }
2121 if (i) { 2152 if (i) {
@@ -2142,13 +2173,11 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2142 lock_device_hash_lock(conf, hash)); 2173 lock_device_hash_lock(conf, hash));
2143 osh = get_free_stripe(conf, hash); 2174 osh = get_free_stripe(conf, hash);
2144 unlock_device_hash_lock(conf, hash); 2175 unlock_device_hash_lock(conf, hash);
2145 atomic_set(&nsh->count, 1); 2176
2146 for(i=0; i<conf->pool_size; i++) { 2177 for(i=0; i<conf->pool_size; i++) {
2147 nsh->dev[i].page = osh->dev[i].page; 2178 nsh->dev[i].page = osh->dev[i].page;
2148 nsh->dev[i].orig_page = osh->dev[i].page; 2179 nsh->dev[i].orig_page = osh->dev[i].page;
2149 } 2180 }
2150 for( ; i<newsize; i++)
2151 nsh->dev[i].page = NULL;
2152 nsh->hash_lock_index = hash; 2181 nsh->hash_lock_index = hash;
2153 kmem_cache_free(conf->slab_cache, osh); 2182 kmem_cache_free(conf->slab_cache, osh);
2154 cnt++; 2183 cnt++;
@@ -2174,25 +2203,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2174 } else 2203 } else
2175 err = -ENOMEM; 2204 err = -ENOMEM;
2176 2205
2177 get_online_cpus();
2178 for_each_present_cpu(cpu) {
2179 struct raid5_percpu *percpu;
2180 struct flex_array *scribble;
2181
2182 percpu = per_cpu_ptr(conf->percpu, cpu);
2183 scribble = scribble_alloc(newsize, conf->chunk_sectors /
2184 STRIPE_SECTORS, GFP_NOIO);
2185
2186 if (scribble) {
2187 flex_array_free(percpu->scribble);
2188 percpu->scribble = scribble;
2189 } else {
2190 err = -ENOMEM;
2191 break;
2192 }
2193 }
2194 put_online_cpus();
2195
2196 /* Step 4, return new stripes to service */ 2206 /* Step 4, return new stripes to service */
2197 while(!list_empty(&newstripes)) { 2207 while(!list_empty(&newstripes)) {
2198 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2208 nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2212,7 +2222,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2212 2222
2213 conf->slab_cache = sc; 2223 conf->slab_cache = sc;
2214 conf->active_name = 1-conf->active_name; 2224 conf->active_name = 1-conf->active_name;
2215 conf->pool_size = newsize; 2225 if (!err)
2226 conf->pool_size = newsize;
2216 return err; 2227 return err;
2217} 2228}
2218 2229
@@ -2434,7 +2445,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
2434 } 2445 }
2435 rdev_dec_pending(rdev, conf->mddev); 2446 rdev_dec_pending(rdev, conf->mddev);
2436 2447
2437 if (sh->batch_head && !uptodate) 2448 if (sh->batch_head && !uptodate && !replacement)
2438 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2449 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2439 2450
2440 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2451 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
@@ -3278,7 +3289,9 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3278 /* reconstruct-write isn't being forced */ 3289 /* reconstruct-write isn't being forced */
3279 return 0; 3290 return 0;
3280 for (i = 0; i < s->failed; i++) { 3291 for (i = 0; i < s->failed; i++) {
3281 if (!test_bit(R5_UPTODATE, &fdev[i]->flags) && 3292 if (s->failed_num[i] != sh->pd_idx &&
3293 s->failed_num[i] != sh->qd_idx &&
3294 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3282 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3295 !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3283 return 1; 3296 return 1;
3284 } 3297 }
@@ -3298,6 +3311,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3298 */ 3311 */
3299 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3312 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3300 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3313 BUG_ON(test_bit(R5_Wantread, &dev->flags));
3314 BUG_ON(sh->batch_head);
3301 if ((s->uptodate == disks - 1) && 3315 if ((s->uptodate == disks - 1) &&
3302 (s->failed && (disk_idx == s->failed_num[0] || 3316 (s->failed && (disk_idx == s->failed_num[0] ||
3303 disk_idx == s->failed_num[1]))) { 3317 disk_idx == s->failed_num[1]))) {
@@ -3366,7 +3380,6 @@ static void handle_stripe_fill(struct stripe_head *sh,
3366{ 3380{
3367 int i; 3381 int i;
3368 3382
3369 BUG_ON(sh->batch_head);
3370 /* look for blocks to read/compute, skip this if a compute 3383 /* look for blocks to read/compute, skip this if a compute
3371 * is already in flight, or if the stripe contents are in the 3384 * is already in flight, or if the stripe contents are in the
3372 * midst of changing due to a write 3385 * midst of changing due to a write
@@ -4198,15 +4211,9 @@ static void check_break_stripe_batch_list(struct stripe_head *sh)
4198 return; 4211 return;
4199 4212
4200 head_sh = sh; 4213 head_sh = sh;
4201 do {
4202 sh = list_first_entry(&sh->batch_list,
4203 struct stripe_head, batch_list);
4204 BUG_ON(sh == head_sh);
4205 } while (!test_bit(STRIPE_DEGRADED, &sh->state));
4206 4214
4207 while (sh != head_sh) { 4215 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4208 next = list_first_entry(&sh->batch_list, 4216
4209 struct stripe_head, batch_list);
4210 list_del_init(&sh->batch_list); 4217 list_del_init(&sh->batch_list);
4211 4218
4212 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, 4219 set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
@@ -4226,8 +4233,6 @@ static void check_break_stripe_batch_list(struct stripe_head *sh)
4226 4233
4227 set_bit(STRIPE_HANDLE, &sh->state); 4234 set_bit(STRIPE_HANDLE, &sh->state);
4228 release_stripe(sh); 4235 release_stripe(sh);
4229
4230 sh = next;
4231 } 4236 }
4232} 4237}
4233 4238
@@ -6221,8 +6226,11 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
6221 percpu->spare_page = alloc_page(GFP_KERNEL); 6226 percpu->spare_page = alloc_page(GFP_KERNEL);
6222 if (!percpu->scribble) 6227 if (!percpu->scribble)
6223 percpu->scribble = scribble_alloc(max(conf->raid_disks, 6228 percpu->scribble = scribble_alloc(max(conf->raid_disks,
6224 conf->previous_raid_disks), conf->chunk_sectors / 6229 conf->previous_raid_disks),
6225 STRIPE_SECTORS, GFP_KERNEL); 6230 max(conf->chunk_sectors,
6231 conf->prev_chunk_sectors)
6232 / STRIPE_SECTORS,
6233 GFP_KERNEL);
6226 6234
6227 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 6235 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
6228 free_scratch_buffer(conf, percpu); 6236 free_scratch_buffer(conf, percpu);
@@ -7198,6 +7206,15 @@ static int check_reshape(struct mddev *mddev)
7198 if (!check_stripe_cache(mddev)) 7206 if (!check_stripe_cache(mddev))
7199 return -ENOSPC; 7207 return -ENOSPC;
7200 7208
7209 if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
7210 mddev->delta_disks > 0)
7211 if (resize_chunks(conf,
7212 conf->previous_raid_disks
7213 + max(0, mddev->delta_disks),
7214 max(mddev->new_chunk_sectors,
7215 mddev->chunk_sectors)
7216 ) < 0)
7217 return -ENOMEM;
7201 return resize_stripes(conf, (conf->previous_raid_disks 7218 return resize_stripes(conf, (conf->previous_raid_disks
7202 + mddev->delta_disks)); 7219 + mddev->delta_disks));
7203} 7220}
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 9c64b5d01c6a..110fd70c7326 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -116,8 +116,8 @@ static struct mcam_format_struct {
116 .planar = false, 116 .planar = false,
117 }, 117 },
118 { 118 {
119 .desc = "UYVY 4:2:2", 119 .desc = "YVYU 4:2:2",
120 .pixelformat = V4L2_PIX_FMT_UYVY, 120 .pixelformat = V4L2_PIX_FMT_YVYU,
121 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, 121 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
122 .bpp = 2, 122 .bpp = 2,
123 .planar = false, 123 .planar = false,
@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
748 748
749 switch (fmt->pixelformat) { 749 switch (fmt->pixelformat) {
750 case V4L2_PIX_FMT_YUYV: 750 case V4L2_PIX_FMT_YUYV:
751 case V4L2_PIX_FMT_UYVY: 751 case V4L2_PIX_FMT_YVYU:
752 widthy = fmt->width * 2; 752 widthy = fmt->width * 2;
753 widthuv = 0; 753 widthuv = 0;
754 break; 754 break;
@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
784 case V4L2_PIX_FMT_YUV420: 784 case V4L2_PIX_FMT_YUV420:
785 case V4L2_PIX_FMT_YVU420: 785 case V4L2_PIX_FMT_YVU420:
786 mcam_reg_write_mask(cam, REG_CTRL0, 786 mcam_reg_write_mask(cam, REG_CTRL0,
787 C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK); 787 C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
788 break; 788 break;
789 case V4L2_PIX_FMT_YUYV: 789 case V4L2_PIX_FMT_YUYV:
790 mcam_reg_write_mask(cam, REG_CTRL0, 790 mcam_reg_write_mask(cam, REG_CTRL0,
791 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK); 791 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
792 break; 792 break;
793 case V4L2_PIX_FMT_UYVY: 793 case V4L2_PIX_FMT_YVYU:
794 mcam_reg_write_mask(cam, REG_CTRL0, 794 mcam_reg_write_mask(cam, REG_CTRL0,
795 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK); 795 C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
796 break; 796 break;
797 case V4L2_PIX_FMT_JPEG: 797 case V4L2_PIX_FMT_JPEG:
798 mcam_reg_write_mask(cam, REG_CTRL0, 798 mcam_reg_write_mask(cam, REG_CTRL0,
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index aa0c6eac254a..7ffdf4dbaf8c 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
330#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */ 330#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
331#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */ 331#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
332#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */ 332#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
333#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */ 333#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
334#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */ 334#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
335#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */ 335#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
336#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */ 336#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
337/* Bayer bits 18,19 if needed */ 337/* Bayer bits 18,19 if needed */
338#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */ 338#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
339#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */ 339#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 9351f64dee7b..6460f8e1b07f 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -135,6 +135,8 @@
135#define VIN_MAX_WIDTH 2048 135#define VIN_MAX_WIDTH 2048
136#define VIN_MAX_HEIGHT 2048 136#define VIN_MAX_HEIGHT 2048
137 137
138#define TIMEOUT_MS 100
139
138enum chip_id { 140enum chip_id {
139 RCAR_GEN2, 141 RCAR_GEN2,
140 RCAR_H1, 142 RCAR_H1,
@@ -820,7 +822,10 @@ static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
820 if (priv->state == STOPPING) { 822 if (priv->state == STOPPING) {
821 priv->request_to_stop = true; 823 priv->request_to_stop = true;
822 spin_unlock_irq(&priv->lock); 824 spin_unlock_irq(&priv->lock);
823 wait_for_completion(&priv->capture_stop); 825 if (!wait_for_completion_timeout(
826 &priv->capture_stop,
827 msecs_to_jiffies(TIMEOUT_MS)))
828 priv->state = STOPPED;
824 spin_lock_irq(&priv->lock); 829 spin_lock_irq(&priv->lock);
825 } 830 }
826 } 831 }
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2c25271f8c41..60f7141a6b02 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1029 md->reset_done &= ~type; 1029 md->reset_done &= ~type;
1030} 1030}
1031 1031
1032int mmc_access_rpmb(struct mmc_queue *mq)
1033{
1034 struct mmc_blk_data *md = mq->data;
1035 /*
1036 * If this is a RPMB partition access, return ture
1037 */
1038 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1039 return true;
1040
1041 return false;
1042}
1043
1032static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1044static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1033{ 1045{
1034 struct mmc_blk_data *md = mq->data; 1046 struct mmc_blk_data *md = mq->data;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 236d194c2883..8efa3684aef8 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
38 return BLKPREP_KILL; 38 return BLKPREP_KILL;
39 } 39 }
40 40
41 if (mq && mmc_card_removed(mq->card)) 41 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
42 return BLKPREP_KILL; 42 return BLKPREP_KILL;
43 43
44 req->cmd_flags |= REQ_DONTPREP; 44 req->cmd_flags |= REQ_DONTPREP;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 5752d50049a3..99e6521e6169 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
73extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); 73extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
74extern void mmc_packed_clean(struct mmc_queue *); 74extern void mmc_packed_clean(struct mmc_queue *);
75 75
76extern int mmc_access_rpmb(struct mmc_queue *);
77
76#endif 78#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c296bc098fe2..92e7671426eb 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2651,6 +2651,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2651 switch (mode) { 2651 switch (mode) {
2652 case PM_HIBERNATION_PREPARE: 2652 case PM_HIBERNATION_PREPARE:
2653 case PM_SUSPEND_PREPARE: 2653 case PM_SUSPEND_PREPARE:
2654 case PM_RESTORE_PREPARE:
2654 spin_lock_irqsave(&host->lock, flags); 2655 spin_lock_irqsave(&host->lock, flags);
2655 host->rescan_disable = 1; 2656 host->rescan_disable = 1;
2656 spin_unlock_irqrestore(&host->lock, flags); 2657 spin_unlock_irqrestore(&host->lock, flags);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 38b29265cc7c..5f5adafb253a 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -589,9 +589,11 @@ static int dw_mci_idmac_init(struct dw_mci *host)
589 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 589 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
590 590
591 /* Forward link the descriptor list */ 591 /* Forward link the descriptor list */
592 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 592 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
593 p->des3 = cpu_to_le32(host->sg_dma + 593 p->des3 = cpu_to_le32(host->sg_dma +
594 (sizeof(struct idmac_desc) * (i + 1))); 594 (sizeof(struct idmac_desc) * (i + 1)));
595 p->des1 = 0;
596 }
595 597
596 /* Set the last descriptor as the end-of-ring descriptor */ 598 /* Set the last descriptor as the end-of-ring descriptor */
597 p->des3 = cpu_to_le32(host->sg_dma); 599 p->des3 = cpu_to_le32(host->sg_dma);
@@ -1300,7 +1302,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
1300 int gpio_cd = mmc_gpio_get_cd(mmc); 1302 int gpio_cd = mmc_gpio_get_cd(mmc);
1301 1303
1302 /* Use platform get_cd function, else try onboard card detect */ 1304 /* Use platform get_cd function, else try onboard card detect */
1303 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 1305 if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
1306 (mmc->caps & MMC_CAP_NONREMOVABLE))
1304 present = 1; 1307 present = 1;
1305 else if (!IS_ERR_VALUE(gpio_cd)) 1308 else if (!IS_ERR_VALUE(gpio_cd))
1306 present = gpio_cd; 1309 present = gpio_cd;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 2b6ef6bd5d5f..7eff087cf515 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1408,7 +1408,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1408 host = mmc_priv(mmc); 1408 host = mmc_priv(mmc);
1409 host->mmc = mmc; 1409 host->mmc = mmc;
1410 host->addr = reg; 1410 host->addr = reg;
1411 host->timeout = msecs_to_jiffies(1000); 1411 host->timeout = msecs_to_jiffies(10000);
1412 host->ccs_enable = !pd || !pd->ccs_unsupported; 1412 host->ccs_enable = !pd || !pd->ccs_unsupported;
1413 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; 1413 host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
1414 1414
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 6bddfe062b51..fc55e8e0351d 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -509,10 +509,11 @@ static int xcan_rx(struct net_device *ndev)
509 cf->can_id |= CAN_RTR_FLAG; 509 cf->can_id |= CAN_RTR_FLAG;
510 } 510 }
511 511
512 if (!(id_xcan & XCAN_IDR_SRR_MASK)) { 512 /* DW1/DW2 must always be read to remove message from RXFIFO */
513 data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); 513 data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
514 data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); 514 data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
515 515
516 if (!(cf->can_id & CAN_RTR_FLAG)) {
516 /* Change Xilinx CAN data format to socketCAN data format */ 517 /* Change Xilinx CAN data format to socketCAN data format */
517 if (cf->can_dlc > 0) 518 if (cf->can_dlc > 0)
518 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 519 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 56ecbe49eb86..7fba330ce702 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1885,6 +1885,9 @@ static void __exit mv88e6xxx_cleanup(void)
1885#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) 1885#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
1886 unregister_switch_driver(&mv88e6171_switch_driver); 1886 unregister_switch_driver(&mv88e6171_switch_driver);
1887#endif 1887#endif
1888#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
1889 unregister_switch_driver(&mv88e6352_switch_driver);
1890#endif
1888#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) 1891#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
1889 unregister_switch_driver(&mv88e6123_61_65_switch_driver); 1892 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
1890#endif 1893#endif
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 089c269637b7..426916036151 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -180,6 +180,7 @@ config SUNLANCE
180config AMD_XGBE 180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver" 181 tristate "AMD 10GbE Ethernet driver"
182 depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA 182 depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
183 depends on ARM64 || COMPILE_TEST
183 select PHYLIB 184 select PHYLIB
184 select AMD_XGBE_PHY 185 select AMD_XGBE_PHY
185 select BITREVERSE 186 select BITREVERSE
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index f4054d242f3c..19e38afbc5ee 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,6 +1,7 @@
1config NET_XGENE 1config NET_XGENE
2 tristate "APM X-Gene SoC Ethernet Driver" 2 tristate "APM X-Gene SoC Ethernet Driver"
3 depends on HAS_DMA 3 depends on HAS_DMA
4 depends on ARCH_XGENE || COMPILE_TEST
4 select PHYLIB 5 select PHYLIB
5 help 6 help
6 This is the Ethernet driver for the on-chip ethernet interface on the 7 This is the Ethernet driver for the on-chip ethernet interface on the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index b10d1744e5ae..2ef202d10948 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4786,6 +4786,11 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4786{ 4786{
4787 struct bnx2x *bp = netdev_priv(dev); 4787 struct bnx2x *bp = netdev_priv(dev);
4788 4788
4789 if (pci_num_vf(bp->pdev)) {
4790 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4791 return -EPERM;
4792 }
4793
4789 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 4794 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4790 BNX2X_ERR("Can't perform change MTU during parity recovery\n"); 4795 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4791 return -EAGAIN; 4796 return -EAGAIN;
@@ -4938,11 +4943,6 @@ int bnx2x_resume(struct pci_dev *pdev)
4938 } 4943 }
4939 bp = netdev_priv(dev); 4944 bp = netdev_priv(dev);
4940 4945
4941 if (pci_num_vf(bp->pdev)) {
4942 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4943 return -EPERM;
4944 }
4945
4946 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 4946 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4947 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 4947 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4948 return -EAGAIN; 4948 return -EAGAIN;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 556dcc162a62..fd52ce95127e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13371,8 +13371,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13371 /* Management FW 'remembers' living interfaces. Allow it some time 13371 /* Management FW 'remembers' living interfaces. Allow it some time
13372 * to forget previously living interfaces, allowing a proper re-load. 13372 * to forget previously living interfaces, allowing a proper re-load.
13373 */ 13373 */
13374 if (is_kdump_kernel()) 13374 if (is_kdump_kernel()) {
13375 msleep(5000); 13375 ktime_t now = ktime_get_boottime();
13376 ktime_t fw_ready_time = ktime_set(5, 0);
13377
13378 if (ktime_before(now, fw_ready_time))
13379 msleep(ktime_ms_delta(fw_ready_time, now));
13380 }
13376 13381
13377 /* An estimated maximum supported CoS number according to the chip 13382 /* An estimated maximum supported CoS number according to the chip
13378 * version. 13383 * version.
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 97c664611e84..e7c10b0addb5 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -983,7 +983,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
983 struct macb_queue *queue = dev_id; 983 struct macb_queue *queue = dev_id;
984 struct macb *bp = queue->bp; 984 struct macb *bp = queue->bp;
985 struct net_device *dev = bp->dev; 985 struct net_device *dev = bp->dev;
986 u32 status; 986 u32 status, ctrl;
987 987
988 status = queue_readl(queue, ISR); 988 status = queue_readl(queue, ISR);
989 989
@@ -1039,6 +1039,15 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1039 * add that if/when we get our hands on a full-blown MII PHY. 1039 * add that if/when we get our hands on a full-blown MII PHY.
1040 */ 1040 */
1041 1041
1042 if (status & MACB_BIT(RXUBR)) {
1043 ctrl = macb_readl(bp, NCR);
1044 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1045 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1046
1047 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1048 macb_writel(bp, ISR, MACB_BIT(RXUBR));
1049 }
1050
1042 if (status & MACB_BIT(ISR_ROVR)) { 1051 if (status & MACB_BIT(ISR_ROVR)) {
1043 /* We missed at least one packet */ 1052 /* We missed at least one packet */
1044 if (macb_is_gem(bp)) 1053 if (macb_is_gem(bp))
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 5d9ceb17b4cb..0abc942c966e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -40,6 +40,7 @@
40#include <linux/ptp_classify.h> 40#include <linux/ptp_classify.h>
41#include <linux/mii.h> 41#include <linux/mii.h>
42#include <linux/mdio.h> 42#include <linux/mdio.h>
43#include <linux/pm_qos.h>
43#include "hw.h" 44#include "hw.h"
44 45
45struct e1000_info; 46struct e1000_info;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 1b0661e3573b..c754b2027281 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -610,7 +610,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
610 unsigned int total_bytes = 0, total_packets = 0; 610 unsigned int total_bytes = 0, total_packets = 0;
611 u16 cleaned_count = fm10k_desc_unused(rx_ring); 611 u16 cleaned_count = fm10k_desc_unused(rx_ring);
612 612
613 do { 613 while (likely(total_packets < budget)) {
614 union fm10k_rx_desc *rx_desc; 614 union fm10k_rx_desc *rx_desc;
615 615
616 /* return some buffers to hardware, one at a time is too slow */ 616 /* return some buffers to hardware, one at a time is too slow */
@@ -659,7 +659,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
659 659
660 /* update budget accounting */ 660 /* update budget accounting */
661 total_packets++; 661 total_packets++;
662 } while (likely(total_packets < budget)); 662 }
663 663
664 /* place incomplete frames back on ring for completion */ 664 /* place incomplete frames back on ring for completion */
665 rx_ring->skb = skb; 665 rx_ring->skb = skb;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8a45ed7506c5..f287186192bb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1036,7 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1036 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 1036 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1037 1037
1038 if (q_vector->rx.ring) 1038 if (q_vector->rx.ring)
1039 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; 1039 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1040 1040
1041 netif_napi_del(&q_vector->napi); 1041 netif_napi_del(&q_vector->napi);
1042 1042
@@ -1207,6 +1207,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1207 q_vector = adapter->q_vector[v_idx]; 1207 q_vector = adapter->q_vector[v_idx];
1208 if (!q_vector) 1208 if (!q_vector)
1209 q_vector = kzalloc(size, GFP_KERNEL); 1209 q_vector = kzalloc(size, GFP_KERNEL);
1210 else
1211 memset(q_vector, 0, size);
1210 if (!q_vector) 1212 if (!q_vector)
1211 return -ENOMEM; 1213 return -ENOMEM;
1212 1214
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a16d267fbce4..e71cdde9cb01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3612,7 +3612,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3612 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3612 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3613 3613
3614 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3614 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3615 dev_kfree_skb(skb); 3615 dev_kfree_skb_any(skb);
3616 return NETDEV_TX_OK; 3616 return NETDEV_TX_OK;
3617 } 3617 }
3618 3618
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 54f0e5ab2e55..0a56f010c846 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -139,7 +139,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
139 int i; 139 int i;
140 int offset = next - start; 140 int offset = next - start;
141 141
142 for (i = 0; i <= num; i++) { 142 for (i = 0; i < num; i++) {
143 ret += be64_to_cpu(*curr); 143 ret += be64_to_cpu(*curr);
144 curr += offset; 144 curr += offset;
145 } 145 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index c7f28bf4b8e2..92fce1b98558 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2845,7 +2845,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2845{ 2845{
2846 int err; 2846 int err;
2847 int eqn = vhcr->in_modifier; 2847 int eqn = vhcr->in_modifier;
2848 int res_id = (slave << 8) | eqn; 2848 int res_id = (slave << 10) | eqn;
2849 struct mlx4_eq_context *eqc = inbox->buf; 2849 struct mlx4_eq_context *eqc = inbox->buf;
2850 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; 2850 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2851 int mtt_size = eq_get_mtt_size(eqc); 2851 int mtt_size = eq_get_mtt_size(eqc);
@@ -3051,7 +3051,7 @@ int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3051 struct mlx4_cmd_info *cmd) 3051 struct mlx4_cmd_info *cmd)
3052{ 3052{
3053 int eqn = vhcr->in_modifier; 3053 int eqn = vhcr->in_modifier;
3054 int res_id = eqn | (slave << 8); 3054 int res_id = eqn | (slave << 10);
3055 struct res_eq *eq; 3055 struct res_eq *eq;
3056 int err; 3056 int err;
3057 3057
@@ -3108,7 +3108,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3108 return 0; 3108 return 0;
3109 3109
3110 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); 3110 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3111 res_id = (slave << 8) | event_eq->eqn; 3111 res_id = (slave << 10) | event_eq->eqn;
3112 err = get_res(dev, slave, res_id, RES_EQ, &req); 3112 err = get_res(dev, slave, res_id, RES_EQ, &req);
3113 if (err) 3113 if (err)
3114 goto unlock; 3114 goto unlock;
@@ -3131,7 +3131,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3131 3131
3132 memcpy(mailbox->buf, (u8 *) eqe, 28); 3132 memcpy(mailbox->buf, (u8 *) eqe, 28);
3133 3133
3134 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); 3134 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3135 3135
3136 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, 3136 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3137 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, 3137 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
@@ -3157,7 +3157,7 @@ int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3157 struct mlx4_cmd_info *cmd) 3157 struct mlx4_cmd_info *cmd)
3158{ 3158{
3159 int eqn = vhcr->in_modifier; 3159 int eqn = vhcr->in_modifier;
3160 int res_id = eqn | (slave << 8); 3160 int res_id = eqn | (slave << 10);
3161 struct res_eq *eq; 3161 struct res_eq *eq;
3162 int err; 3162 int err;
3163 3163
@@ -4714,13 +4714,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4714 break; 4714 break;
4715 4715
4716 case RES_EQ_HW: 4716 case RES_EQ_HW:
4717 err = mlx4_cmd(dev, slave, eqn & 0xff, 4717 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4718 1, MLX4_CMD_HW2SW_EQ, 4718 1, MLX4_CMD_HW2SW_EQ,
4719 MLX4_CMD_TIME_CLASS_A, 4719 MLX4_CMD_TIME_CLASS_A,
4720 MLX4_CMD_NATIVE); 4720 MLX4_CMD_NATIVE);
4721 if (err) 4721 if (err)
4722 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 4722 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4723 slave, eqn); 4723 slave, eqn & 0x3ff);
4724 atomic_dec(&eq->mtt->ref_count); 4724 atomic_dec(&eq->mtt->ref_count);
4725 state = RES_EQ_RESERVED; 4725 state = RES_EQ_RESERVED;
4726 break; 4726 break;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8da7c3faf817..7b43a3b4abdc 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1764,7 +1764,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1764 int done = 0; 1764 int done = 0;
1765 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 1765 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
1766 1766
1767 if (!spin_trylock(&adapter->tx_clean_lock)) 1767 if (!spin_trylock_bh(&adapter->tx_clean_lock))
1768 return 1; 1768 return 1;
1769 1769
1770 sw_consumer = tx_ring->sw_consumer; 1770 sw_consumer = tx_ring->sw_consumer;
@@ -1819,7 +1819,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1819 */ 1819 */
1820 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1820 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1821 done = (sw_consumer == hw_consumer); 1821 done = (sw_consumer == hw_consumer);
1822 spin_unlock(&adapter->tx_clean_lock); 1822 spin_unlock_bh(&adapter->tx_clean_lock);
1823 1823
1824 return done; 1824 return done;
1825} 1825}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index c6b749880e46..2f87909f5186 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -912,6 +912,8 @@ qca_spi_probe(struct spi_device *spi)
912 qca->spi_dev = spi; 912 qca->spi_dev = spi;
913 qca->legacy_mode = legacy_mode; 913 qca->legacy_mode = legacy_mode;
914 914
915 spi_set_drvdata(spi, qcaspi_devs);
916
915 mac = of_get_mac_address(spi->dev.of_node); 917 mac = of_get_mac_address(spi->dev.of_node);
916 918
917 if (mac) 919 if (mac)
@@ -944,8 +946,6 @@ qca_spi_probe(struct spi_device *spi)
944 return -EFAULT; 946 return -EFAULT;
945 } 947 }
946 948
947 spi_set_drvdata(spi, qcaspi_devs);
948
949 qcaspi_init_device_debugfs(qca); 949 qcaspi_init_device_debugfs(qca);
950 950
951 return 0; 951 return 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c70ab40d8698..3df51faf18ae 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6884,7 +6884,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
6884 rtl8169_start_xmit(nskb, tp->dev); 6884 rtl8169_start_xmit(nskb, tp->dev);
6885 } while (segs); 6885 } while (segs);
6886 6886
6887 dev_kfree_skb(skb); 6887 dev_consume_skb_any(skb);
6888 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 6888 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
6889 if (skb_checksum_help(skb) < 0) 6889 if (skb_checksum_help(skb) < 0)
6890 goto drop; 6890 goto drop;
@@ -6896,7 +6896,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
6896drop: 6896drop:
6897 stats = &tp->dev->stats; 6897 stats = &tp->dev->stats;
6898 stats->tx_dropped++; 6898 stats->tx_dropped++;
6899 dev_kfree_skb(skb); 6899 dev_kfree_skb_any(skb);
6900 } 6900 }
6901} 6901}
6902 6902
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 14b363a25c02..630f0b7800e4 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2238,9 +2238,10 @@ static int smc_drv_probe(struct platform_device *pdev)
2238 const struct of_device_id *match = NULL; 2238 const struct of_device_id *match = NULL;
2239 struct smc_local *lp; 2239 struct smc_local *lp;
2240 struct net_device *ndev; 2240 struct net_device *ndev;
2241 struct resource *res, *ires; 2241 struct resource *res;
2242 unsigned int __iomem *addr; 2242 unsigned int __iomem *addr;
2243 unsigned long irq_flags = SMC_IRQ_FLAGS; 2243 unsigned long irq_flags = SMC_IRQ_FLAGS;
2244 unsigned long irq_resflags;
2244 int ret; 2245 int ret;
2245 2246
2246 ndev = alloc_etherdev(sizeof(struct smc_local)); 2247 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2332,16 +2333,19 @@ static int smc_drv_probe(struct platform_device *pdev)
2332 goto out_free_netdev; 2333 goto out_free_netdev;
2333 } 2334 }
2334 2335
2335 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2336 ndev->irq = platform_get_irq(pdev, 0);
2336 if (!ires) { 2337 if (ndev->irq <= 0) {
2337 ret = -ENODEV; 2338 ret = -ENODEV;
2338 goto out_release_io; 2339 goto out_release_io;
2339 } 2340 }
2340 2341 /*
2341 ndev->irq = ires->start; 2342 * If this platform does not specify any special irqflags, or if
2342 2343 * the resource supplies a trigger, override the irqflags with
2343 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) 2344 * the trigger flags from the resource.
2344 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2345 */
2346 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2347 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2348 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2345 2349
2346 ret = smc_request_attrib(pdev, ndev); 2350 ret = smc_request_attrib(pdev, ndev);
2347 if (ret) 2351 if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 41047c9143d0..959aeeade0c9 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2418,9 +2418,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2418 struct net_device *dev; 2418 struct net_device *dev;
2419 struct smsc911x_data *pdata; 2419 struct smsc911x_data *pdata;
2420 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); 2420 struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
2421 struct resource *res, *irq_res; 2421 struct resource *res;
2422 unsigned int intcfg = 0; 2422 unsigned int intcfg = 0;
2423 int res_size, irq_flags; 2423 int res_size, irq, irq_flags;
2424 int retval; 2424 int retval;
2425 2425
2426 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2426 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2434,8 +2434,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2434 } 2434 }
2435 res_size = resource_size(res); 2435 res_size = resource_size(res);
2436 2436
2437 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2437 irq = platform_get_irq(pdev, 0);
2438 if (!irq_res) { 2438 if (irq <= 0) {
2439 pr_warn("Could not allocate irq resource\n"); 2439 pr_warn("Could not allocate irq resource\n");
2440 retval = -ENODEV; 2440 retval = -ENODEV;
2441 goto out_0; 2441 goto out_0;
@@ -2455,8 +2455,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2455 SET_NETDEV_DEV(dev, &pdev->dev); 2455 SET_NETDEV_DEV(dev, &pdev->dev);
2456 2456
2457 pdata = netdev_priv(dev); 2457 pdata = netdev_priv(dev);
2458 dev->irq = irq_res->start; 2458 dev->irq = irq;
2459 irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; 2459 irq_flags = irq_get_trigger_type(irq);
2460 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2460 pdata->ioaddr = ioremap_nocache(res->start, res_size);
2461 2461
2462 pdata->dev = dev; 2462 pdata->dev = dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 705bbdf93940..68aec5c460db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -23,6 +23,7 @@
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/module.h>
26#include <linux/io.h> 27#include <linux/io.h>
27#include <linux/of.h> 28#include <linux/of.h>
28#include <linux/of_net.h> 29#include <linux/of_net.h>
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index ca640d04fd93..3b99a4df71f8 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -705,8 +705,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
705 705
706 cur_p->app0 |= STS_CTRL_APP0_SOP; 706 cur_p->app0 |= STS_CTRL_APP0_SOP;
707 cur_p->len = skb_headlen(skb); 707 cur_p->len = skb_headlen(skb);
708 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, 708 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
709 DMA_TO_DEVICE); 709 skb_headlen(skb), DMA_TO_DEVICE);
710 cur_p->app4 = (unsigned long)skb; 710 cur_p->app4 = (unsigned long)skb;
711 711
712 for (ii = 0; ii < num_frag; ii++) { 712 for (ii = 0; ii < num_frag; ii++) {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1c4f265f4e7c..b0249685139c 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -837,7 +837,6 @@ int netvsc_send(struct hv_device *device,
837 u16 q_idx = packet->q_idx; 837 u16 q_idx = packet->q_idx;
838 u32 pktlen = packet->total_data_buflen, msd_len = 0; 838 u32 pktlen = packet->total_data_buflen, msd_len = 0;
839 unsigned int section_index = NETVSC_INVALID_INDEX; 839 unsigned int section_index = NETVSC_INVALID_INDEX;
840 struct sk_buff *skb = NULL;
841 unsigned long flag; 840 unsigned long flag;
842 struct multi_send_data *msdp; 841 struct multi_send_data *msdp;
843 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; 842 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
@@ -935,12 +934,8 @@ int netvsc_send(struct hv_device *device,
935 if (cur_send) 934 if (cur_send)
936 ret = netvsc_send_pkt(cur_send, net_device); 935 ret = netvsc_send_pkt(cur_send, net_device);
937 936
938 if (ret != 0) { 937 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
939 if (section_index != NETVSC_INVALID_INDEX) 938 netvsc_free_send_slot(net_device, section_index);
940 netvsc_free_send_slot(net_device, section_index);
941 } else if (skb) {
942 dev_kfree_skb_any(skb);
943 }
944 939
945 return ret; 940 return ret;
946} 941}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 38026650c038..67d00fbc2e0e 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -85,6 +85,7 @@ struct at86rf230_local {
85 struct ieee802154_hw *hw; 85 struct ieee802154_hw *hw;
86 struct at86rf2xx_chip_data *data; 86 struct at86rf2xx_chip_data *data;
87 struct regmap *regmap; 87 struct regmap *regmap;
88 int slp_tr;
88 89
89 struct completion state_complete; 90 struct completion state_complete;
90 struct at86rf230_state_change state; 91 struct at86rf230_state_change state;
@@ -95,163 +96,164 @@ struct at86rf230_local {
95 unsigned long cal_timeout; 96 unsigned long cal_timeout;
96 s8 max_frame_retries; 97 s8 max_frame_retries;
97 bool is_tx; 98 bool is_tx;
99 bool is_tx_from_off;
98 u8 tx_retry; 100 u8 tx_retry;
99 struct sk_buff *tx_skb; 101 struct sk_buff *tx_skb;
100 struct at86rf230_state_change tx; 102 struct at86rf230_state_change tx;
101}; 103};
102 104
103#define RG_TRX_STATUS (0x01) 105#define RG_TRX_STATUS (0x01)
104#define SR_TRX_STATUS 0x01, 0x1f, 0 106#define SR_TRX_STATUS 0x01, 0x1f, 0
105#define SR_RESERVED_01_3 0x01, 0x20, 5 107#define SR_RESERVED_01_3 0x01, 0x20, 5
106#define SR_CCA_STATUS 0x01, 0x40, 6 108#define SR_CCA_STATUS 0x01, 0x40, 6
107#define SR_CCA_DONE 0x01, 0x80, 7 109#define SR_CCA_DONE 0x01, 0x80, 7
108#define RG_TRX_STATE (0x02) 110#define RG_TRX_STATE (0x02)
109#define SR_TRX_CMD 0x02, 0x1f, 0 111#define SR_TRX_CMD 0x02, 0x1f, 0
110#define SR_TRAC_STATUS 0x02, 0xe0, 5 112#define SR_TRAC_STATUS 0x02, 0xe0, 5
111#define RG_TRX_CTRL_0 (0x03) 113#define RG_TRX_CTRL_0 (0x03)
112#define SR_CLKM_CTRL 0x03, 0x07, 0 114#define SR_CLKM_CTRL 0x03, 0x07, 0
113#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 115#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
114#define SR_PAD_IO_CLKM 0x03, 0x30, 4 116#define SR_PAD_IO_CLKM 0x03, 0x30, 4
115#define SR_PAD_IO 0x03, 0xc0, 6 117#define SR_PAD_IO 0x03, 0xc0, 6
116#define RG_TRX_CTRL_1 (0x04) 118#define RG_TRX_CTRL_1 (0x04)
117#define SR_IRQ_POLARITY 0x04, 0x01, 0 119#define SR_IRQ_POLARITY 0x04, 0x01, 0
118#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 120#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
119#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 121#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
120#define SR_RX_BL_CTRL 0x04, 0x10, 4 122#define SR_RX_BL_CTRL 0x04, 0x10, 4
121#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 123#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
122#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 124#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
123#define SR_PA_EXT_EN 0x04, 0x80, 7 125#define SR_PA_EXT_EN 0x04, 0x80, 7
124#define RG_PHY_TX_PWR (0x05) 126#define RG_PHY_TX_PWR (0x05)
125#define SR_TX_PWR 0x05, 0x0f, 0 127#define SR_TX_PWR 0x05, 0x0f, 0
126#define SR_PA_LT 0x05, 0x30, 4 128#define SR_PA_LT 0x05, 0x30, 4
127#define SR_PA_BUF_LT 0x05, 0xc0, 6 129#define SR_PA_BUF_LT 0x05, 0xc0, 6
128#define RG_PHY_RSSI (0x06) 130#define RG_PHY_RSSI (0x06)
129#define SR_RSSI 0x06, 0x1f, 0 131#define SR_RSSI 0x06, 0x1f, 0
130#define SR_RND_VALUE 0x06, 0x60, 5 132#define SR_RND_VALUE 0x06, 0x60, 5
131#define SR_RX_CRC_VALID 0x06, 0x80, 7 133#define SR_RX_CRC_VALID 0x06, 0x80, 7
132#define RG_PHY_ED_LEVEL (0x07) 134#define RG_PHY_ED_LEVEL (0x07)
133#define SR_ED_LEVEL 0x07, 0xff, 0 135#define SR_ED_LEVEL 0x07, 0xff, 0
134#define RG_PHY_CC_CCA (0x08) 136#define RG_PHY_CC_CCA (0x08)
135#define SR_CHANNEL 0x08, 0x1f, 0 137#define SR_CHANNEL 0x08, 0x1f, 0
136#define SR_CCA_MODE 0x08, 0x60, 5 138#define SR_CCA_MODE 0x08, 0x60, 5
137#define SR_CCA_REQUEST 0x08, 0x80, 7 139#define SR_CCA_REQUEST 0x08, 0x80, 7
138#define RG_CCA_THRES (0x09) 140#define RG_CCA_THRES (0x09)
139#define SR_CCA_ED_THRES 0x09, 0x0f, 0 141#define SR_CCA_ED_THRES 0x09, 0x0f, 0
140#define SR_RESERVED_09_1 0x09, 0xf0, 4 142#define SR_RESERVED_09_1 0x09, 0xf0, 4
141#define RG_RX_CTRL (0x0a) 143#define RG_RX_CTRL (0x0a)
142#define SR_PDT_THRES 0x0a, 0x0f, 0 144#define SR_PDT_THRES 0x0a, 0x0f, 0
143#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 145#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
144#define RG_SFD_VALUE (0x0b) 146#define RG_SFD_VALUE (0x0b)
145#define SR_SFD_VALUE 0x0b, 0xff, 0 147#define SR_SFD_VALUE 0x0b, 0xff, 0
146#define RG_TRX_CTRL_2 (0x0c) 148#define RG_TRX_CTRL_2 (0x0c)
147#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 149#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
148#define SR_SUB_MODE 0x0c, 0x04, 2 150#define SR_SUB_MODE 0x0c, 0x04, 2
149#define SR_BPSK_QPSK 0x0c, 0x08, 3 151#define SR_BPSK_QPSK 0x0c, 0x08, 3
150#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 152#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
151#define SR_RESERVED_0c_5 0x0c, 0x60, 5 153#define SR_RESERVED_0c_5 0x0c, 0x60, 5
152#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 154#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
153#define RG_ANT_DIV (0x0d) 155#define RG_ANT_DIV (0x0d)
154#define SR_ANT_CTRL 0x0d, 0x03, 0 156#define SR_ANT_CTRL 0x0d, 0x03, 0
155#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 157#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
156#define SR_ANT_DIV_EN 0x0d, 0x08, 3 158#define SR_ANT_DIV_EN 0x0d, 0x08, 3
157#define SR_RESERVED_0d_2 0x0d, 0x70, 4 159#define SR_RESERVED_0d_2 0x0d, 0x70, 4
158#define SR_ANT_SEL 0x0d, 0x80, 7 160#define SR_ANT_SEL 0x0d, 0x80, 7
159#define RG_IRQ_MASK (0x0e) 161#define RG_IRQ_MASK (0x0e)
160#define SR_IRQ_MASK 0x0e, 0xff, 0 162#define SR_IRQ_MASK 0x0e, 0xff, 0
161#define RG_IRQ_STATUS (0x0f) 163#define RG_IRQ_STATUS (0x0f)
162#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 164#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
163#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 165#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
164#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 166#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
165#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 167#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
166#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 168#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
167#define SR_IRQ_5_AMI 0x0f, 0x20, 5 169#define SR_IRQ_5_AMI 0x0f, 0x20, 5
168#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 170#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
169#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 171#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
170#define RG_VREG_CTRL (0x10) 172#define RG_VREG_CTRL (0x10)
171#define SR_RESERVED_10_6 0x10, 0x03, 0 173#define SR_RESERVED_10_6 0x10, 0x03, 0
172#define SR_DVDD_OK 0x10, 0x04, 2 174#define SR_DVDD_OK 0x10, 0x04, 2
173#define SR_DVREG_EXT 0x10, 0x08, 3 175#define SR_DVREG_EXT 0x10, 0x08, 3
174#define SR_RESERVED_10_3 0x10, 0x30, 4 176#define SR_RESERVED_10_3 0x10, 0x30, 4
175#define SR_AVDD_OK 0x10, 0x40, 6 177#define SR_AVDD_OK 0x10, 0x40, 6
176#define SR_AVREG_EXT 0x10, 0x80, 7 178#define SR_AVREG_EXT 0x10, 0x80, 7
177#define RG_BATMON (0x11) 179#define RG_BATMON (0x11)
178#define SR_BATMON_VTH 0x11, 0x0f, 0 180#define SR_BATMON_VTH 0x11, 0x0f, 0
179#define SR_BATMON_HR 0x11, 0x10, 4 181#define SR_BATMON_HR 0x11, 0x10, 4
180#define SR_BATMON_OK 0x11, 0x20, 5 182#define SR_BATMON_OK 0x11, 0x20, 5
181#define SR_RESERVED_11_1 0x11, 0xc0, 6 183#define SR_RESERVED_11_1 0x11, 0xc0, 6
182#define RG_XOSC_CTRL (0x12) 184#define RG_XOSC_CTRL (0x12)
183#define SR_XTAL_TRIM 0x12, 0x0f, 0 185#define SR_XTAL_TRIM 0x12, 0x0f, 0
184#define SR_XTAL_MODE 0x12, 0xf0, 4 186#define SR_XTAL_MODE 0x12, 0xf0, 4
185#define RG_RX_SYN (0x15) 187#define RG_RX_SYN (0x15)
186#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 188#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
187#define SR_RESERVED_15_2 0x15, 0x70, 4 189#define SR_RESERVED_15_2 0x15, 0x70, 4
188#define SR_RX_PDT_DIS 0x15, 0x80, 7 190#define SR_RX_PDT_DIS 0x15, 0x80, 7
189#define RG_XAH_CTRL_1 (0x17) 191#define RG_XAH_CTRL_1 (0x17)
190#define SR_RESERVED_17_8 0x17, 0x01, 0 192#define SR_RESERVED_17_8 0x17, 0x01, 0
191#define SR_AACK_PROM_MODE 0x17, 0x02, 1 193#define SR_AACK_PROM_MODE 0x17, 0x02, 1
192#define SR_AACK_ACK_TIME 0x17, 0x04, 2 194#define SR_AACK_ACK_TIME 0x17, 0x04, 2
193#define SR_RESERVED_17_5 0x17, 0x08, 3 195#define SR_RESERVED_17_5 0x17, 0x08, 3
194#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 196#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
195#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 197#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
196#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 198#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
197#define SR_RESERVED_17_1 0x17, 0x80, 7 199#define SR_RESERVED_17_1 0x17, 0x80, 7
198#define RG_FTN_CTRL (0x18) 200#define RG_FTN_CTRL (0x18)
199#define SR_RESERVED_18_2 0x18, 0x7f, 0 201#define SR_RESERVED_18_2 0x18, 0x7f, 0
200#define SR_FTN_START 0x18, 0x80, 7 202#define SR_FTN_START 0x18, 0x80, 7
201#define RG_PLL_CF (0x1a) 203#define RG_PLL_CF (0x1a)
202#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 204#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
203#define SR_PLL_CF_START 0x1a, 0x80, 7 205#define SR_PLL_CF_START 0x1a, 0x80, 7
204#define RG_PLL_DCU (0x1b) 206#define RG_PLL_DCU (0x1b)
205#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 207#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
206#define SR_RESERVED_1b_2 0x1b, 0x40, 6 208#define SR_RESERVED_1b_2 0x1b, 0x40, 6
207#define SR_PLL_DCU_START 0x1b, 0x80, 7 209#define SR_PLL_DCU_START 0x1b, 0x80, 7
208#define RG_PART_NUM (0x1c) 210#define RG_PART_NUM (0x1c)
209#define SR_PART_NUM 0x1c, 0xff, 0 211#define SR_PART_NUM 0x1c, 0xff, 0
210#define RG_VERSION_NUM (0x1d) 212#define RG_VERSION_NUM (0x1d)
211#define SR_VERSION_NUM 0x1d, 0xff, 0 213#define SR_VERSION_NUM 0x1d, 0xff, 0
212#define RG_MAN_ID_0 (0x1e) 214#define RG_MAN_ID_0 (0x1e)
213#define SR_MAN_ID_0 0x1e, 0xff, 0 215#define SR_MAN_ID_0 0x1e, 0xff, 0
214#define RG_MAN_ID_1 (0x1f) 216#define RG_MAN_ID_1 (0x1f)
215#define SR_MAN_ID_1 0x1f, 0xff, 0 217#define SR_MAN_ID_1 0x1f, 0xff, 0
216#define RG_SHORT_ADDR_0 (0x20) 218#define RG_SHORT_ADDR_0 (0x20)
217#define SR_SHORT_ADDR_0 0x20, 0xff, 0 219#define SR_SHORT_ADDR_0 0x20, 0xff, 0
218#define RG_SHORT_ADDR_1 (0x21) 220#define RG_SHORT_ADDR_1 (0x21)
219#define SR_SHORT_ADDR_1 0x21, 0xff, 0 221#define SR_SHORT_ADDR_1 0x21, 0xff, 0
220#define RG_PAN_ID_0 (0x22) 222#define RG_PAN_ID_0 (0x22)
221#define SR_PAN_ID_0 0x22, 0xff, 0 223#define SR_PAN_ID_0 0x22, 0xff, 0
222#define RG_PAN_ID_1 (0x23) 224#define RG_PAN_ID_1 (0x23)
223#define SR_PAN_ID_1 0x23, 0xff, 0 225#define SR_PAN_ID_1 0x23, 0xff, 0
224#define RG_IEEE_ADDR_0 (0x24) 226#define RG_IEEE_ADDR_0 (0x24)
225#define SR_IEEE_ADDR_0 0x24, 0xff, 0 227#define SR_IEEE_ADDR_0 0x24, 0xff, 0
226#define RG_IEEE_ADDR_1 (0x25) 228#define RG_IEEE_ADDR_1 (0x25)
227#define SR_IEEE_ADDR_1 0x25, 0xff, 0 229#define SR_IEEE_ADDR_1 0x25, 0xff, 0
228#define RG_IEEE_ADDR_2 (0x26) 230#define RG_IEEE_ADDR_2 (0x26)
229#define SR_IEEE_ADDR_2 0x26, 0xff, 0 231#define SR_IEEE_ADDR_2 0x26, 0xff, 0
230#define RG_IEEE_ADDR_3 (0x27) 232#define RG_IEEE_ADDR_3 (0x27)
231#define SR_IEEE_ADDR_3 0x27, 0xff, 0 233#define SR_IEEE_ADDR_3 0x27, 0xff, 0
232#define RG_IEEE_ADDR_4 (0x28) 234#define RG_IEEE_ADDR_4 (0x28)
233#define SR_IEEE_ADDR_4 0x28, 0xff, 0 235#define SR_IEEE_ADDR_4 0x28, 0xff, 0
234#define RG_IEEE_ADDR_5 (0x29) 236#define RG_IEEE_ADDR_5 (0x29)
235#define SR_IEEE_ADDR_5 0x29, 0xff, 0 237#define SR_IEEE_ADDR_5 0x29, 0xff, 0
236#define RG_IEEE_ADDR_6 (0x2a) 238#define RG_IEEE_ADDR_6 (0x2a)
237#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 239#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
238#define RG_IEEE_ADDR_7 (0x2b) 240#define RG_IEEE_ADDR_7 (0x2b)
239#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 241#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
240#define RG_XAH_CTRL_0 (0x2c) 242#define RG_XAH_CTRL_0 (0x2c)
241#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 243#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
242#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 244#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
243#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 245#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
244#define RG_CSMA_SEED_0 (0x2d) 246#define RG_CSMA_SEED_0 (0x2d)
245#define SR_CSMA_SEED_0 0x2d, 0xff, 0 247#define SR_CSMA_SEED_0 0x2d, 0xff, 0
246#define RG_CSMA_SEED_1 (0x2e) 248#define RG_CSMA_SEED_1 (0x2e)
247#define SR_CSMA_SEED_1 0x2e, 0x07, 0 249#define SR_CSMA_SEED_1 0x2e, 0x07, 0
248#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 250#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
249#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 251#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
250#define SR_AACK_SET_PD 0x2e, 0x20, 5 252#define SR_AACK_SET_PD 0x2e, 0x20, 5
251#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 253#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
252#define RG_CSMA_BE (0x2f) 254#define RG_CSMA_BE (0x2f)
253#define SR_MIN_BE 0x2f, 0x0f, 0 255#define SR_MIN_BE 0x2f, 0x0f, 0
254#define SR_MAX_BE 0x2f, 0xf0, 4 256#define SR_MAX_BE 0x2f, 0xf0, 4
255 257
256#define CMD_REG 0x80 258#define CMD_REG 0x80
257#define CMD_REG_MASK 0x3f 259#define CMD_REG_MASK 0x3f
@@ -292,6 +294,8 @@ struct at86rf230_local {
292#define STATE_BUSY_RX_AACK_NOCLK 0x1E 294#define STATE_BUSY_RX_AACK_NOCLK 0x1E
293#define STATE_TRANSITION_IN_PROGRESS 0x1F 295#define STATE_TRANSITION_IN_PROGRESS 0x1F
294 296
297#define TRX_STATE_MASK (0x1F)
298
295#define AT86RF2XX_NUMREGS 0x3F 299#define AT86RF2XX_NUMREGS 0x3F
296 300
297static void 301static void
@@ -336,6 +340,14 @@ at86rf230_write_subreg(struct at86rf230_local *lp,
336 return regmap_update_bits(lp->regmap, addr, mask, data << shift); 340 return regmap_update_bits(lp->regmap, addr, mask, data << shift);
337} 341}
338 342
343static inline void
344at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp)
345{
346 gpio_set_value(lp->slp_tr, 1);
347 udelay(1);
348 gpio_set_value(lp->slp_tr, 0);
349}
350
339static bool 351static bool
340at86rf230_reg_writeable(struct device *dev, unsigned int reg) 352at86rf230_reg_writeable(struct device *dev, unsigned int reg)
341{ 353{
@@ -509,7 +521,7 @@ at86rf230_async_state_assert(void *context)
509 struct at86rf230_state_change *ctx = context; 521 struct at86rf230_state_change *ctx = context;
510 struct at86rf230_local *lp = ctx->lp; 522 struct at86rf230_local *lp = ctx->lp;
511 const u8 *buf = ctx->buf; 523 const u8 *buf = ctx->buf;
512 const u8 trx_state = buf[1] & 0x1f; 524 const u8 trx_state = buf[1] & TRX_STATE_MASK;
513 525
514 /* Assert state change */ 526 /* Assert state change */
515 if (trx_state != ctx->to_state) { 527 if (trx_state != ctx->to_state) {
@@ -609,11 +621,17 @@ at86rf230_async_state_delay(void *context)
609 switch (ctx->to_state) { 621 switch (ctx->to_state) {
610 case STATE_RX_AACK_ON: 622 case STATE_RX_AACK_ON:
611 tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); 623 tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
624 /* state change from TRX_OFF to RX_AACK_ON to do a
625 * calibration, we need to reset the timeout for the
626 * next one.
627 */
628 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
612 goto change; 629 goto change;
630 case STATE_TX_ARET_ON:
613 case STATE_TX_ON: 631 case STATE_TX_ON:
614 tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); 632 tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
615 /* state change from TRX_OFF to TX_ON to do a 633 /* state change from TRX_OFF to TX_ON or ARET_ON to do
616 * calibration, we need to reset the timeout for the 634 * a calibration, we need to reset the timeout for the
617 * next one. 635 * next one.
618 */ 636 */
619 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; 637 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
@@ -667,7 +685,7 @@ at86rf230_async_state_change_start(void *context)
667 struct at86rf230_state_change *ctx = context; 685 struct at86rf230_state_change *ctx = context;
668 struct at86rf230_local *lp = ctx->lp; 686 struct at86rf230_local *lp = ctx->lp;
669 u8 *buf = ctx->buf; 687 u8 *buf = ctx->buf;
670 const u8 trx_state = buf[1] & 0x1f; 688 const u8 trx_state = buf[1] & TRX_STATE_MASK;
671 int rc; 689 int rc;
672 690
673 /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ 691 /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
@@ -773,16 +791,6 @@ at86rf230_tx_on(void *context)
773} 791}
774 792
775static void 793static void
776at86rf230_tx_trac_error(void *context)
777{
778 struct at86rf230_state_change *ctx = context;
779 struct at86rf230_local *lp = ctx->lp;
780
781 at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
782 at86rf230_tx_on, true);
783}
784
785static void
786at86rf230_tx_trac_check(void *context) 794at86rf230_tx_trac_check(void *context)
787{ 795{
788 struct at86rf230_state_change *ctx = context; 796 struct at86rf230_state_change *ctx = context;
@@ -791,12 +799,12 @@ at86rf230_tx_trac_check(void *context)
791 const u8 trac = (buf[1] & 0xe0) >> 5; 799 const u8 trac = (buf[1] & 0xe0) >> 5;
792 800
793 /* If trac status is different than zero we need to do a state change 801 /* If trac status is different than zero we need to do a state change
794 * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver 802 * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the
795 * state to TX_ON. 803 * transceiver.
796 */ 804 */
797 if (trac) 805 if (trac)
798 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, 806 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
799 at86rf230_tx_trac_error, true); 807 at86rf230_tx_on, true);
800 else 808 else
801 at86rf230_tx_on(context); 809 at86rf230_tx_on(context);
802} 810}
@@ -941,13 +949,18 @@ at86rf230_write_frame_complete(void *context)
941 u8 *buf = ctx->buf; 949 u8 *buf = ctx->buf;
942 int rc; 950 int rc;
943 951
944 buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
945 buf[1] = STATE_BUSY_TX;
946 ctx->trx.len = 2; 952 ctx->trx.len = 2;
947 ctx->msg.complete = NULL; 953
948 rc = spi_async(lp->spi, &ctx->msg); 954 if (gpio_is_valid(lp->slp_tr)) {
949 if (rc) 955 at86rf230_slp_tr_rising_edge(lp);
950 at86rf230_async_error(lp, ctx, rc); 956 } else {
957 buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
958 buf[1] = STATE_BUSY_TX;
959 ctx->msg.complete = NULL;
960 rc = spi_async(lp->spi, &ctx->msg);
961 if (rc)
962 at86rf230_async_error(lp, ctx, rc);
963 }
951} 964}
952 965
953static void 966static void
@@ -993,12 +1006,21 @@ at86rf230_xmit_start(void *context)
993 * are in STATE_TX_ON. The pfad differs here, so we change 1006 * are in STATE_TX_ON. The pfad differs here, so we change
994 * the complete handler. 1007 * the complete handler.
995 */ 1008 */
996 if (lp->tx_aret) 1009 if (lp->tx_aret) {
997 at86rf230_async_state_change(lp, ctx, STATE_TX_ON, 1010 if (lp->is_tx_from_off) {
998 at86rf230_xmit_tx_on, false); 1011 lp->is_tx_from_off = false;
999 else 1012 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
1013 at86rf230_xmit_tx_on,
1014 false);
1015 } else {
1016 at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
1017 at86rf230_xmit_tx_on,
1018 false);
1019 }
1020 } else {
1000 at86rf230_async_state_change(lp, ctx, STATE_TX_ON, 1021 at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
1001 at86rf230_write_frame, false); 1022 at86rf230_write_frame, false);
1023 }
1002} 1024}
1003 1025
1004static int 1026static int
@@ -1017,11 +1039,13 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
1017 * to TX_ON, the lp->cal_timeout should be reinit by state_delay 1039 * to TX_ON, the lp->cal_timeout should be reinit by state_delay
1018 * function then to start in the next 5 minutes. 1040 * function then to start in the next 5 minutes.
1019 */ 1041 */
1020 if (time_is_before_jiffies(lp->cal_timeout)) 1042 if (time_is_before_jiffies(lp->cal_timeout)) {
1043 lp->is_tx_from_off = true;
1021 at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, 1044 at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
1022 at86rf230_xmit_start, false); 1045 at86rf230_xmit_start, false);
1023 else 1046 } else {
1024 at86rf230_xmit_start(ctx); 1047 at86rf230_xmit_start(ctx);
1048 }
1025 1049
1026 return 0; 1050 return 0;
1027} 1051}
@@ -1037,9 +1061,6 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
1037static int 1061static int
1038at86rf230_start(struct ieee802154_hw *hw) 1062at86rf230_start(struct ieee802154_hw *hw)
1039{ 1063{
1040 struct at86rf230_local *lp = hw->priv;
1041
1042 lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
1043 return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); 1064 return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
1044} 1065}
1045 1066
@@ -1673,6 +1694,7 @@ static int at86rf230_probe(struct spi_device *spi)
1673 lp = hw->priv; 1694 lp = hw->priv;
1674 lp->hw = hw; 1695 lp->hw = hw;
1675 lp->spi = spi; 1696 lp->spi = spi;
1697 lp->slp_tr = slp_tr;
1676 hw->parent = &spi->dev; 1698 hw->parent = &spi->dev;
1677 hw->vif_data_size = sizeof(*lp); 1699 hw->vif_data_size = sizeof(*lp);
1678 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); 1700 ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b227a13f6473..9f59f17dc317 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -599,10 +599,18 @@ static int macvlan_open(struct net_device *dev)
599 goto del_unicast; 599 goto del_unicast;
600 } 600 }
601 601
602 if (dev->flags & IFF_PROMISC) {
603 err = dev_set_promiscuity(lowerdev, 1);
604 if (err < 0)
605 goto clear_multi;
606 }
607
602hash_add: 608hash_add:
603 macvlan_hash_add(vlan); 609 macvlan_hash_add(vlan);
604 return 0; 610 return 0;
605 611
612clear_multi:
613 dev_set_allmulti(lowerdev, -1);
606del_unicast: 614del_unicast:
607 dev_uc_del(lowerdev, dev->dev_addr); 615 dev_uc_del(lowerdev, dev->dev_addr);
608out: 616out:
@@ -638,6 +646,9 @@ static int macvlan_stop(struct net_device *dev)
638 if (dev->flags & IFF_ALLMULTI) 646 if (dev->flags & IFF_ALLMULTI)
639 dev_set_allmulti(lowerdev, -1); 647 dev_set_allmulti(lowerdev, -1);
640 648
649 if (dev->flags & IFF_PROMISC)
650 dev_set_promiscuity(lowerdev, -1);
651
641 dev_uc_del(lowerdev, dev->dev_addr); 652 dev_uc_del(lowerdev, dev->dev_addr);
642 653
643hash_del: 654hash_del:
@@ -696,6 +707,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
696 if (dev->flags & IFF_UP) { 707 if (dev->flags & IFF_UP) {
697 if (change & IFF_ALLMULTI) 708 if (change & IFF_ALLMULTI)
698 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 709 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
710 if (change & IFF_PROMISC)
711 dev_set_promiscuity(lowerdev,
712 dev->flags & IFF_PROMISC ? 1 : -1);
713
699 } 714 }
700} 715}
701 716
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 8fadaa14b9f0..70641d2c0429 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -27,6 +27,7 @@ config AMD_PHY
27config AMD_XGBE_PHY 27config AMD_XGBE_PHY
28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" 28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
29 depends on (OF || ACPI) && HAS_IOMEM 29 depends on (OF || ACPI) && HAS_IOMEM
30 depends on ARM64 || COMPILE_TEST
30 ---help--- 31 ---help---
31 Currently supports the AMD 10GbE PHY 32 Currently supports the AMD 10GbE PHY
32 33
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index c9cb486c753d..53d18150f4e2 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -168,7 +168,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
168 if (!new_bus->irq[i]) 168 if (!new_bus->irq[i])
169 new_bus->irq[i] = PHY_POLL; 169 new_bus->irq[i] = PHY_POLL;
170 170
171 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); 171 if (bus_id != -1)
172 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
173 else
174 strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
172 175
173 if (devm_gpio_request(dev, bitbang->mdc, "mdc")) 176 if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
174 goto out_free_bus; 177 goto out_free_bus;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 1190fd8f0088..ebdc357c5131 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -548,7 +548,8 @@ static int kszphy_probe(struct phy_device *phydev)
548 } 548 }
549 549
550 clk = devm_clk_get(&phydev->dev, "rmii-ref"); 550 clk = devm_clk_get(&phydev->dev, "rmii-ref");
551 if (!IS_ERR(clk)) { 551 /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
552 if (!IS_ERR_OR_NULL(clk)) {
552 unsigned long rate = clk_get_rate(clk); 553 unsigned long rate = clk_get_rate(clk);
553 bool rmii_ref_clk_sel_25_mhz; 554 bool rmii_ref_clk_sel_25_mhz;
554 555
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f86c5ab334aa..3837ae344f63 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -465,6 +465,10 @@ static void pppoe_unbind_sock_work(struct work_struct *work)
465 struct sock *sk = sk_pppox(po); 465 struct sock *sk = sk_pppox(po);
466 466
467 lock_sock(sk); 467 lock_sock(sk);
468 if (po->pppoe_dev) {
469 dev_put(po->pppoe_dev);
470 po->pppoe_dev = NULL;
471 }
468 pppox_unbind_sock(sk); 472 pppox_unbind_sock(sk);
469 release_sock(sk); 473 release_sock(sk);
470 sock_put(sk); 474 sock_put(sk);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ac4d03b328b1..aafa1a1898e4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4116,6 +4116,7 @@ static struct usb_device_id rtl8152_table[] = {
4116 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4116 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4117 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4117 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4118 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 4118 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
4119 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4119 {} 4120 {}
4120}; 4121};
4121 4122
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 733f4feb2ef3..3c86b107275a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1285 struct net_device *net) 1285 struct net_device *net)
1286{ 1286{
1287 struct usbnet *dev = netdev_priv(net); 1287 struct usbnet *dev = netdev_priv(net);
1288 int length; 1288 unsigned int length;
1289 struct urb *urb = NULL; 1289 struct urb *urb = NULL;
1290 struct skb_data *entry; 1290 struct skb_data *entry;
1291 struct driver_info *info = dev->driver_info; 1291 struct driver_info *info = dev->driver_info;
@@ -1413,7 +1413,7 @@ not_drop:
1413 } 1413 }
1414 } else 1414 } else
1415 netif_dbg(dev, tx_queued, dev->net, 1415 netif_dbg(dev, tx_queued, dev->net,
1416 "> tx, len %d, type 0x%x\n", length, skb->protocol); 1416 "> tx, len %u, type 0x%x\n", length, skb->protocol);
1417#ifdef CONFIG_PM 1417#ifdef CONFIG_PM
1418deferred: 1418deferred:
1419#endif 1419#endif
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0acd079ba96b..3ad79bb4f2c2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1103,28 +1103,14 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
1103 struct sk_buff *skb; 1103 struct sk_buff *skb;
1104 struct ath_frame_info *fi; 1104 struct ath_frame_info *fi;
1105 struct ieee80211_tx_info *info; 1105 struct ieee80211_tx_info *info;
1106 struct ieee80211_vif *vif;
1107 struct ath_hw *ah = sc->sc_ah; 1106 struct ath_hw *ah = sc->sc_ah;
1108 1107
1109 if (sc->tx99_state || !ah->tpc_enabled) 1108 if (sc->tx99_state || !ah->tpc_enabled)
1110 return MAX_RATE_POWER; 1109 return MAX_RATE_POWER;
1111 1110
1112 skb = bf->bf_mpdu; 1111 skb = bf->bf_mpdu;
1113 info = IEEE80211_SKB_CB(skb);
1114 vif = info->control.vif;
1115
1116 if (!vif) {
1117 max_power = sc->cur_chan->cur_txpower;
1118 goto out;
1119 }
1120
1121 if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) {
1122 max_power = min_t(u8, sc->cur_chan->cur_txpower,
1123 2 * vif->bss_conf.txpower);
1124 goto out;
1125 }
1126
1127 fi = get_frame_info(skb); 1112 fi = get_frame_info(skb);
1113 info = IEEE80211_SKB_CB(skb);
1128 1114
1129 if (!AR_SREV_9300_20_OR_LATER(ah)) { 1115 if (!AR_SREV_9300_20_OR_LATER(ah)) {
1130 int txpower = fi->tx_power; 1116 int txpower = fi->tx_power;
@@ -1161,25 +1147,26 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
1161 txpower -= 2; 1147 txpower -= 2;
1162 1148
1163 txpower = max(txpower, 0); 1149 txpower = max(txpower, 0);
1164 max_power = min_t(u8, ah->tx_power[rateidx], 1150 max_power = min_t(u8, ah->tx_power[rateidx], txpower);
1165 2 * vif->bss_conf.txpower); 1151
1166 max_power = min_t(u8, max_power, txpower); 1152 /* XXX: clamp minimum TX power at 1 for AR9160 since if
1153 * max_power is set to 0, frames are transmitted at max
1154 * TX power
1155 */
1156 if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
1157 max_power = 1;
1167 } else if (!bf->bf_state.bfs_paprd) { 1158 } else if (!bf->bf_state.bfs_paprd) {
1168 if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) 1159 if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
1169 max_power = min_t(u8, ah->tx_power_stbc[rateidx], 1160 max_power = min_t(u8, ah->tx_power_stbc[rateidx],
1170 2 * vif->bss_conf.txpower); 1161 fi->tx_power);
1171 else 1162 else
1172 max_power = min_t(u8, ah->tx_power[rateidx], 1163 max_power = min_t(u8, ah->tx_power[rateidx],
1173 2 * vif->bss_conf.txpower); 1164 fi->tx_power);
1174 max_power = min(max_power, fi->tx_power);
1175 } else { 1165 } else {
1176 max_power = ah->paprd_training_power; 1166 max_power = ah->paprd_training_power;
1177 } 1167 }
1178out: 1168
1179 /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power 1169 return max_power;
1180 * is set to 0, frames are transmitted at max TX power
1181 */
1182 return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power;
1183} 1170}
1184 1171
1185static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, 1172static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -2129,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
2129 struct ath_node *an = NULL; 2116 struct ath_node *an = NULL;
2130 enum ath9k_key_type keytype; 2117 enum ath9k_key_type keytype;
2131 bool short_preamble = false; 2118 bool short_preamble = false;
2119 u8 txpower;
2132 2120
2133 /* 2121 /*
2134 * We check if Short Preamble is needed for the CTS rate by 2122 * We check if Short Preamble is needed for the CTS rate by
@@ -2145,6 +2133,16 @@ static void setup_frame_info(struct ieee80211_hw *hw,
2145 if (sta) 2133 if (sta)
2146 an = (struct ath_node *) sta->drv_priv; 2134 an = (struct ath_node *) sta->drv_priv;
2147 2135
2136 if (tx_info->control.vif) {
2137 struct ieee80211_vif *vif = tx_info->control.vif;
2138
2139 txpower = 2 * vif->bss_conf.txpower;
2140 } else {
2141 struct ath_softc *sc = hw->priv;
2142
2143 txpower = sc->cur_chan->cur_txpower;
2144 }
2145
2148 memset(fi, 0, sizeof(*fi)); 2146 memset(fi, 0, sizeof(*fi));
2149 fi->txq = -1; 2147 fi->txq = -1;
2150 if (hw_key) 2148 if (hw_key)
@@ -2155,7 +2153,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
2155 fi->keyix = ATH9K_TXKEYIX_INVALID; 2153 fi->keyix = ATH9K_TXKEYIX_INVALID;
2156 fi->keytype = keytype; 2154 fi->keytype = keytype;
2157 fi->framelen = framelen; 2155 fi->framelen = framelen;
2158 fi->tx_power = MAX_RATE_POWER; 2156 fi->tx_power = txpower;
2159 2157
2160 if (!rate) 2158 if (!rate)
2161 return; 2159 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index bfdf3faa6c47..62db2e5e45eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -244,6 +244,7 @@ enum iwl_ucode_tlv_flag {
244 * longer than the passive one, which is essential for fragmented scan. 244 * longer than the passive one, which is essential for fragmented scan.
245 * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. 245 * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
246 * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR 246 * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
247 * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
247 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 248 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
248 * regardless of the band or the number of the probes. FW will calculate 249 * regardless of the band or the number of the probes. FW will calculate
249 * the actual dwell time. 250 * the actual dwell time.
@@ -260,6 +261,7 @@ enum iwl_ucode_tlv_api {
260 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 261 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
261 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), 262 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
262 IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), 263 IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
264 IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11),
263 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 265 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
264 IWL_UCODE_TLV_API_SCD_CFG = BIT(15), 266 IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
265 IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), 267 IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 6dfed1259260..56254a837214 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -6,7 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
32 * BSD LICENSE 32 * BSD LICENSE
33 * 33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -421,8 +421,9 @@ struct iwl_trans_txq_scd_cfg {
421 * 421 *
422 * All the handlers MUST be implemented 422 * All the handlers MUST be implemented
423 * 423 *
424 * @start_hw: starts the HW- from that point on, the HW can send interrupts 424 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
425 * May sleep 425 * out of a low power state. From that point on, the HW can send
426 * interrupts. May sleep.
426 * @op_mode_leave: Turn off the HW RF kill indication if on 427 * @op_mode_leave: Turn off the HW RF kill indication if on
427 * May sleep 428 * May sleep
428 * @start_fw: allocates and inits all the resources for the transport 429 * @start_fw: allocates and inits all the resources for the transport
@@ -432,10 +433,11 @@ struct iwl_trans_txq_scd_cfg {
432 * the SCD base address in SRAM, then provide it here, or 0 otherwise. 433 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
433 * May sleep 434 * May sleep
434 * @stop_device: stops the whole device (embedded CPU put to reset) and stops 435 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
435 * the HW. From that point on, the HW will be in low power but will still 436 * the HW. If low_power is true, the NIC will be put in low power state.
436 * issue interrupt if the HW RF kill is triggered. This callback must do 437 * From that point on, the HW will be stopped but will still issue an
437 * the right thing and not crash even if start_hw() was called but not 438 * interrupt if the HW RF kill switch is triggered.
438 * start_fw(). May sleep 439 * This callback must do the right thing and not crash even if %start_hw()
440 * was called but not &start_fw(). May sleep.
439 * @d3_suspend: put the device into the correct mode for WoWLAN during 441 * @d3_suspend: put the device into the correct mode for WoWLAN during
440 * suspend. This is optional, if not implemented WoWLAN will not be 442 * suspend. This is optional, if not implemented WoWLAN will not be
441 * supported. This callback may sleep. 443 * supported. This callback may sleep.
@@ -491,14 +493,14 @@ struct iwl_trans_txq_scd_cfg {
491 */ 493 */
492struct iwl_trans_ops { 494struct iwl_trans_ops {
493 495
494 int (*start_hw)(struct iwl_trans *iwl_trans); 496 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
495 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 497 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
496 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 498 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
497 bool run_in_rfkill); 499 bool run_in_rfkill);
498 int (*update_sf)(struct iwl_trans *trans, 500 int (*update_sf)(struct iwl_trans *trans,
499 struct iwl_sf_region *st_fwrd_space); 501 struct iwl_sf_region *st_fwrd_space);
500 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 502 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
501 void (*stop_device)(struct iwl_trans *trans); 503 void (*stop_device)(struct iwl_trans *trans, bool low_power);
502 504
503 void (*d3_suspend)(struct iwl_trans *trans, bool test); 505 void (*d3_suspend)(struct iwl_trans *trans, bool test);
504 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 506 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
@@ -652,11 +654,16 @@ static inline void iwl_trans_configure(struct iwl_trans *trans,
652 trans->ops->configure(trans, trans_cfg); 654 trans->ops->configure(trans, trans_cfg);
653} 655}
654 656
655static inline int iwl_trans_start_hw(struct iwl_trans *trans) 657static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
656{ 658{
657 might_sleep(); 659 might_sleep();
658 660
659 return trans->ops->start_hw(trans); 661 return trans->ops->start_hw(trans, low_power);
662}
663
664static inline int iwl_trans_start_hw(struct iwl_trans *trans)
665{
666 return trans->ops->start_hw(trans, true);
660} 667}
661 668
662static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) 669static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
@@ -703,15 +710,21 @@ static inline int iwl_trans_update_sf(struct iwl_trans *trans,
703 return 0; 710 return 0;
704} 711}
705 712
706static inline void iwl_trans_stop_device(struct iwl_trans *trans) 713static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
714 bool low_power)
707{ 715{
708 might_sleep(); 716 might_sleep();
709 717
710 trans->ops->stop_device(trans); 718 trans->ops->stop_device(trans, low_power);
711 719
712 trans->state = IWL_TRANS_NO_FW; 720 trans->state = IWL_TRANS_NO_FW;
713} 721}
714 722
723static inline void iwl_trans_stop_device(struct iwl_trans *trans)
724{
725 _iwl_trans_stop_device(trans, true);
726}
727
715static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) 728static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
716{ 729{
717 might_sleep(); 730 might_sleep();
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index a6c48c7b1e16..1b1b2bf26819 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -1726,7 +1726,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1726 results->matched_profiles = le32_to_cpu(query->matched_profiles); 1726 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1727 memcpy(results->matches, query->matches, sizeof(results->matches)); 1727 memcpy(results->matches, query->matches, sizeof(results->matches));
1728 1728
1729#ifdef CPTCFG_IWLWIFI_DEBUGFS 1729#ifdef CONFIG_IWLWIFI_DEBUGFS
1730 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); 1730 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1731#endif 1731#endif
1732 1732
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 4fc0938b3fb6..b1baa33cc19b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -298,6 +298,40 @@ struct iwl_uapsd_misbehaving_ap_notif {
298} __packed; 298} __packed;
299 299
300/** 300/**
301 * struct iwl_reduce_tx_power_cmd - TX power reduction command
302 * REDUCE_TX_POWER_CMD = 0x9f
303 * @flags: (reserved for future implementation)
304 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
305 * @pwr_restriction: TX power restriction in dBms.
306 */
307struct iwl_reduce_tx_power_cmd {
308 u8 flags;
309 u8 mac_context_id;
310 __le16 pwr_restriction;
311} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
312
313/**
314 * struct iwl_dev_tx_power_cmd - TX power reduction command
315 * REDUCE_TX_POWER_CMD = 0x9f
316 * @set_mode: 0 - MAC tx power, 1 - device tx power
317 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
318 * @pwr_restriction: TX power restriction in 1/8 dBms.
319 * @dev_24: device TX power restriction in 1/8 dBms
320 * @dev_52_low: device TX power restriction upper band - low
321 * @dev_52_high: device TX power restriction upper band - high
322 */
323struct iwl_dev_tx_power_cmd {
324 __le32 set_mode;
325 __le32 mac_context_id;
326 __le16 pwr_restriction;
327 __le16 dev_24;
328 __le16 dev_52_low;
329 __le16 dev_52_high;
330} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
331
332#define IWL_DEV_MAX_TX_POWER 0x7FFF
333
334/**
301 * struct iwl_beacon_filter_cmd 335 * struct iwl_beacon_filter_cmd
302 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) 336 * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
303 * @id_and_color: MAC contex identifier 337 * @id_and_color: MAC contex identifier
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 4f81dcf57a73..d6cced47d561 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -122,46 +122,6 @@ enum iwl_scan_complete_status {
122 SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, 122 SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
123}; 123};
124 124
125/**
126 * struct iwl_scan_results_notif - scan results for one channel
127 * ( SCAN_RESULTS_NOTIFICATION = 0x83 )
128 * @channel: which channel the results are from
129 * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
130 * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
131 * @num_probe_not_sent: # of request that weren't sent due to not enough time
132 * @duration: duration spent in channel, in usecs
133 * @statistics: statistics gathered for this channel
134 */
135struct iwl_scan_results_notif {
136 u8 channel;
137 u8 band;
138 u8 probe_status;
139 u8 num_probe_not_sent;
140 __le32 duration;
141 __le32 statistics[SCAN_RESULTS_STATISTICS];
142} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */
143
144/**
145 * struct iwl_scan_complete_notif - notifies end of scanning (all channels)
146 * ( SCAN_COMPLETE_NOTIFICATION = 0x84 )
147 * @scanned_channels: number of channels scanned (and number of valid results)
148 * @status: one of SCAN_COMP_STATUS_*
149 * @bt_status: BT on/off status
150 * @last_channel: last channel that was scanned
151 * @tsf_low: TSF timer (lower half) in usecs
152 * @tsf_high: TSF timer (higher half) in usecs
153 * @results: array of scan results, only "scanned_channels" of them are valid
154 */
155struct iwl_scan_complete_notif {
156 u8 scanned_channels;
157 u8 status;
158 u8 bt_status;
159 u8 last_channel;
160 __le32 tsf_low;
161 __le32 tsf_high;
162 struct iwl_scan_results_notif results[];
163} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
164
165/* scan offload */ 125/* scan offload */
166#define IWL_SCAN_MAX_BLACKLIST_LEN 64 126#define IWL_SCAN_MAX_BLACKLIST_LEN 64
167#define IWL_SCAN_SHORT_BLACKLIST_LEN 16 127#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
@@ -554,7 +514,7 @@ struct iwl_scan_req_unified_lmac {
554} __packed; 514} __packed;
555 515
556/** 516/**
557 * struct iwl_lmac_scan_results_notif - scan results for one channel - 517 * struct iwl_scan_results_notif - scan results for one channel -
558 * SCAN_RESULT_NTF_API_S_VER_3 518 * SCAN_RESULT_NTF_API_S_VER_3
559 * @channel: which channel the results are from 519 * @channel: which channel the results are from
560 * @band: 0 for 5.2 GHz, 1 for 2.4 GHz 520 * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
@@ -562,7 +522,7 @@ struct iwl_scan_req_unified_lmac {
562 * @num_probe_not_sent: # of request that weren't sent due to not enough time 522 * @num_probe_not_sent: # of request that weren't sent due to not enough time
563 * @duration: duration spent in channel, in usecs 523 * @duration: duration spent in channel, in usecs
564 */ 524 */
565struct iwl_lmac_scan_results_notif { 525struct iwl_scan_results_notif {
566 u8 channel; 526 u8 channel;
567 u8 band; 527 u8 band;
568 u8 probe_status; 528 u8 probe_status;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index aab68cbae754..01b1da6ad359 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -281,19 +281,6 @@ struct iwl_tx_ant_cfg_cmd {
281 __le32 valid; 281 __le32 valid;
282} __packed; 282} __packed;
283 283
284/**
285 * struct iwl_reduce_tx_power_cmd - TX power reduction command
286 * REDUCE_TX_POWER_CMD = 0x9f
287 * @flags: (reserved for future implementation)
288 * @mac_context_id: id of the mac ctx for which we are reducing TX power.
289 * @pwr_restriction: TX power restriction in dBms.
290 */
291struct iwl_reduce_tx_power_cmd {
292 u8 flags;
293 u8 mac_context_id;
294 __le16 pwr_restriction;
295} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
296
297/* 284/*
298 * Calibration control struct. 285 * Calibration control struct.
299 * Sent as part of the phy configuration command. 286 * Sent as part of the phy configuration command.
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index bc5eac4960e1..df869633f4dd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -6,7 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
32 * BSD LICENSE 32 * BSD LICENSE
33 * 33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -322,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
322 322
323 lockdep_assert_held(&mvm->mutex); 323 lockdep_assert_held(&mvm->mutex);
324 324
325 if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) 325 if (WARN_ON_ONCE(mvm->calibrating))
326 return 0; 326 return 0;
327 327
328 iwl_init_notification_wait(&mvm->notif_wait, 328 iwl_init_notification_wait(&mvm->notif_wait,
@@ -396,8 +396,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
396 */ 396 */
397 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 397 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
398 MVM_UCODE_CALIB_TIMEOUT); 398 MVM_UCODE_CALIB_TIMEOUT);
399 if (!ret)
400 mvm->init_ucode_complete = true;
401 399
402 if (ret && iwl_mvm_is_radio_killed(mvm)) { 400 if (ret && iwl_mvm_is_radio_killed(mvm)) {
403 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); 401 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
@@ -494,15 +492,6 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
494 492
495 mvm->fw_dump_desc = desc; 493 mvm->fw_dump_desc = desc;
496 494
497 /* stop recording */
498 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
499 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
500 } else {
501 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
502 /* wait before we collect the data till the DBGC stop */
503 udelay(100);
504 }
505
506 queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); 495 queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
507 496
508 return 0; 497 return 0;
@@ -658,25 +647,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
658 * module loading, load init ucode now 647 * module loading, load init ucode now
659 * (for example, if we were in RFKILL) 648 * (for example, if we were in RFKILL)
660 */ 649 */
661 if (!mvm->init_ucode_complete) { 650 ret = iwl_run_init_mvm_ucode(mvm, false);
662 ret = iwl_run_init_mvm_ucode(mvm, false); 651 if (ret && !iwlmvm_mod_params.init_dbg) {
663 if (ret && !iwlmvm_mod_params.init_dbg) { 652 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
664 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 653 /* this can't happen */
665 /* this can't happen */ 654 if (WARN_ON(ret > 0))
666 if (WARN_ON(ret > 0)) 655 ret = -ERFKILL;
667 ret = -ERFKILL; 656 goto error;
668 goto error; 657 }
669 } 658 if (!iwlmvm_mod_params.init_dbg) {
670 if (!iwlmvm_mod_params.init_dbg) { 659 /*
671 /* 660 * Stop and start the transport without entering low power
672 * should stop and start HW since that INIT 661 * mode. This will save the state of other components on the
673 * image just loaded 662 * device that are triggered by the INIT firwmare (MFUART).
674 */ 663 */
675 iwl_trans_stop_device(mvm->trans); 664 _iwl_trans_stop_device(mvm->trans, false);
676 ret = iwl_trans_start_hw(mvm->trans); 665 _iwl_trans_start_hw(mvm->trans, false);
677 if (ret) 666 if (ret)
678 return ret; 667 return ret;
679 }
680 } 668 }
681 669
682 if (iwlmvm_mod_params.init_dbg) 670 if (iwlmvm_mod_params.init_dbg)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 84555170b6f7..40265b9c66ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1322,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1322 1322
1323 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1323 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1324 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1324 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1325 ret = iwl_mvm_update_quotas(mvm, false, NULL); 1325 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1326 if (ret) 1326 if (ret)
1327 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1327 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1328 ret); 1328 ret);
@@ -1471,8 +1471,8 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1471 return NULL; 1471 return NULL;
1472} 1472}
1473 1473
1474static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1474static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
1475 s8 tx_power) 1475 struct ieee80211_vif *vif, s8 tx_power)
1476{ 1476{
1477 /* FW is in charge of regulatory enforcement */ 1477 /* FW is in charge of regulatory enforcement */
1478 struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = { 1478 struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
@@ -1485,6 +1485,26 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1485 &reduce_txpwr_cmd); 1485 &reduce_txpwr_cmd);
1486} 1486}
1487 1487
1488static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1489 s16 tx_power)
1490{
1491 struct iwl_dev_tx_power_cmd cmd = {
1492 .set_mode = 0,
1493 .mac_context_id =
1494 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1495 .pwr_restriction = cpu_to_le16(8 * tx_power),
1496 };
1497
1498 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
1499 return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
1500
1501 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1502 cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1503
1504 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
1505 sizeof(cmd), &cmd);
1506}
1507
1488static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1508static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1489 struct ieee80211_vif *vif) 1509 struct ieee80211_vif *vif)
1490{ 1510{
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d5522a161242..cf70f681d1ac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -603,7 +603,6 @@ struct iwl_mvm {
603 603
604 enum iwl_ucode_type cur_ucode; 604 enum iwl_ucode_type cur_ucode;
605 bool ucode_loaded; 605 bool ucode_loaded;
606 bool init_ucode_complete;
607 bool calibrating; 606 bool calibrating;
608 u32 error_event_table; 607 u32 error_event_table;
609 u32 log_event_table; 608 u32 log_event_table;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a08b03d58d4b..1c66297d82c0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -865,6 +865,16 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
865 return; 865 return;
866 866
867 mutex_lock(&mvm->mutex); 867 mutex_lock(&mvm->mutex);
868
869 /* stop recording */
870 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
871 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
872 } else {
873 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
874 /* wait before we collect the data till the DBGC stop */
875 udelay(100);
876 }
877
868 iwl_mvm_fw_error_dump(mvm); 878 iwl_mvm_fw_error_dump(mvm);
869 879
870 /* start recording again if the firmware is not crashed */ 880 /* start recording again if the firmware is not crashed */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 78ec7db64ba5..d6314ddf57b5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -478,6 +478,11 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
478 if (vif->type != NL80211_IFTYPE_STATION) 478 if (vif->type != NL80211_IFTYPE_STATION)
479 return; 479 return;
480 480
481 if (sig == 0) {
482 IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
483 return;
484 }
485
481 mvmvif->bf_data.ave_beacon_signal = sig; 486 mvmvif->bf_data.ave_beacon_signal = sig;
482 487
483 /* BT Coex */ 488 /* BT Coex */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 74e1c86289dc..1075a213bd6a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -319,7 +319,7 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
319 struct iwl_device_cmd *cmd) 319 struct iwl_device_cmd *cmd)
320{ 320{
321 struct iwl_rx_packet *pkt = rxb_addr(rxb); 321 struct iwl_rx_packet *pkt = rxb_addr(rxb);
322 struct iwl_scan_complete_notif *notif = (void *)pkt->data; 322 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
323 323
324 IWL_DEBUG_SCAN(mvm, 324 IWL_DEBUG_SCAN(mvm,
325 "Scan offload iteration complete: status=0x%x scanned channels=%d\n", 325 "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 2de8fbfe4edf..47bbf573fdc8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,8 +5,8 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
31 * 31 *
32 * BSD LICENSE 32 * BSD LICENSE
33 * 33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
104static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) 104static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
105{ 105{
106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
107 struct page *page; 107 struct page *page = NULL;
108 dma_addr_t phys; 108 dma_addr_t phys;
109 u32 size; 109 u32 size;
110 u8 power; 110 u8 power;
@@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
131 DMA_FROM_DEVICE); 131 DMA_FROM_DEVICE);
132 if (dma_mapping_error(trans->dev, phys)) { 132 if (dma_mapping_error(trans->dev, phys)) {
133 __free_pages(page, order); 133 __free_pages(page, order);
134 page = NULL;
134 continue; 135 continue;
135 } 136 }
136 IWL_INFO(trans, 137 IWL_INFO(trans,
@@ -1020,7 +1021,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1020 iwl_pcie_tx_start(trans, scd_addr); 1021 iwl_pcie_tx_start(trans, scd_addr);
1021} 1022}
1022 1023
1023static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1024static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1024{ 1025{
1025 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1026 bool hw_rfkill, was_hw_rfkill; 1027 bool hw_rfkill, was_hw_rfkill;
@@ -1115,7 +1116,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1115void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1116void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1116{ 1117{
1117 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) 1118 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1118 iwl_trans_pcie_stop_device(trans); 1119 iwl_trans_pcie_stop_device(trans, true);
1119} 1120}
1120 1121
1121static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) 1122static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -1200,7 +1201,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1200 return 0; 1201 return 0;
1201} 1202}
1202 1203
1203static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1204static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1204{ 1205{
1205 bool hw_rfkill; 1206 bool hw_rfkill;
1206 int err; 1207 int err;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index f0188c83c79f..2721cf89fb16 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
126 126
127 do { 127 do {
128 status = usb_control_msg(udev, pipe, request, reqtype, value, 128 status = usb_control_msg(udev, pipe, request, reqtype, value,
129 index, pdata, len, 0); /*max. timeout*/ 129 index, pdata, len, 1000);
130 if (status < 0) { 130 if (status < 0) {
131 /* firmware download is checksumed, don't retry */ 131 /* firmware download is checksumed, don't retry */
132 if ((value >= FW_8192C_START_ADDRESS && 132 if ((value >= FW_8192C_START_ADDRESS &&
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 89dca77ca038..18ee2089df4a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
1110EXPORT_SYMBOL_GPL(devm_pinctrl_put); 1110EXPORT_SYMBOL_GPL(devm_pinctrl_put);
1111 1111
1112int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, 1112int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
1113 bool dup, bool locked) 1113 bool dup)
1114{ 1114{
1115 int i, ret; 1115 int i, ret;
1116 struct pinctrl_maps *maps_node; 1116 struct pinctrl_maps *maps_node;
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
1178 maps_node->maps = maps; 1178 maps_node->maps = maps;
1179 } 1179 }
1180 1180
1181 if (!locked) 1181 mutex_lock(&pinctrl_maps_mutex);
1182 mutex_lock(&pinctrl_maps_mutex);
1183 list_add_tail(&maps_node->node, &pinctrl_maps); 1182 list_add_tail(&maps_node->node, &pinctrl_maps);
1184 if (!locked) 1183 mutex_unlock(&pinctrl_maps_mutex);
1185 mutex_unlock(&pinctrl_maps_mutex);
1186 1184
1187 return 0; 1185 return 0;
1188} 1186}
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
1197int pinctrl_register_mappings(struct pinctrl_map const *maps, 1195int pinctrl_register_mappings(struct pinctrl_map const *maps,
1198 unsigned num_maps) 1196 unsigned num_maps)
1199{ 1197{
1200 return pinctrl_register_map(maps, num_maps, true, false); 1198 return pinctrl_register_map(maps, num_maps, true);
1201} 1199}
1202 1200
1203void pinctrl_unregister_map(struct pinctrl_map const *map) 1201void pinctrl_unregister_map(struct pinctrl_map const *map)
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 75476b3d87da..b24ea846c867 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
183} 183}
184 184
185int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, 185int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
186 bool dup, bool locked); 186 bool dup);
187void pinctrl_unregister_map(struct pinctrl_map const *map); 187void pinctrl_unregister_map(struct pinctrl_map const *map);
188 188
189extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); 189extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index eda13de2e7c0..0bbf7d71b281 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
92 dt_map->num_maps = num_maps; 92 dt_map->num_maps = num_maps;
93 list_add_tail(&dt_map->node, &p->dt_maps); 93 list_add_tail(&dt_map->node, &p->dt_maps);
94 94
95 return pinctrl_register_map(map, num_maps, false, true); 95 return pinctrl_register_map(map, num_maps, false);
96} 96}
97 97
98struct pinctrl_dev *of_pinctrl_get(struct device_node *np) 98struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 493294c0ebe6..474812e2b0cb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -881,6 +881,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
881 if (!mtk_eint_get_mask(pctl, eint_num)) { 881 if (!mtk_eint_get_mask(pctl, eint_num)) {
882 mtk_eint_mask(d); 882 mtk_eint_mask(d);
883 unmask = 1; 883 unmask = 1;
884 } else {
885 unmask = 0;
884 } 886 }
885 887
886 clr_bit = 0xff << eint_offset; 888 clr_bit = 0xff << eint_offset;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 42f930f70de3..03aa58c4cb85 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
364 MPP_FUNCTION(0x5, "audio", "mclk"), 364 MPP_FUNCTION(0x5, "audio", "mclk"),
365 MPP_FUNCTION(0x6, "uart0", "cts")), 365 MPP_FUNCTION(0x6, "uart0", "cts")),
366 MPP_MODE(63, 366 MPP_MODE(63,
367 MPP_FUNCTION(0x0, "gpo", NULL), 367 MPP_FUNCTION(0x0, "gpio", NULL),
368 MPP_FUNCTION(0x1, "spi0", "sck"), 368 MPP_FUNCTION(0x1, "spi0", "sck"),
369 MPP_FUNCTION(0x2, "tclk", NULL)), 369 MPP_FUNCTION(0x2, "tclk", NULL)),
370 MPP_MODE(64, 370 MPP_MODE(64,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index b2d22218a258..ae4115e4b4ef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
260 val = 1; 260 val = 1;
261 } 261 }
262 262
263 val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
263 val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; 264 val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
264 val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; 265 val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
265 266
@@ -417,7 +418,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
417 return ret; 418 return ret;
418 419
419 val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT; 420 val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT;
420 val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT; 421 val |= pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
421 422
422 ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val); 423 ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val);
423 if (ret < 0) 424 if (ret < 0)
@@ -466,12 +467,13 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
466 seq_puts(s, " ---"); 467 seq_puts(s, " ---");
467 } else { 468 } else {
468 469
469 if (!pad->input_enabled) { 470 if (pad->input_enabled) {
470 ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS); 471 ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
471 if (!ret) { 472 if (ret < 0)
472 ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; 473 return;
473 pad->out_value = ret; 474
474 } 475 ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
476 pad->out_value = ret;
475 } 477 }
476 478
477 seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); 479 seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 8f36c5f91949..211b942ad6d5 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
370 } 370 }
371 } 371 }
372 372
373 val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
373 val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT; 374 val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
374 val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK; 375 val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
375 376
@@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
576 577
577 if (pad->input_enabled) { 578 if (pad->input_enabled) {
578 ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS); 579 ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
579 if (!ret) { 580 if (ret < 0)
580 ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; 581 return;
581 pad->out_value = ret; 582
582 } 583 ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
584 pad->out_value = ret;
583 } 585 }
584 586
585 seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); 587 seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b3d419a84723..b496db87bc05 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -830,6 +830,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
830 */ 830 */
831static const struct dmi_system_id no_hw_rfkill_list[] = { 831static const struct dmi_system_id no_hw_rfkill_list[] = {
832 { 832 {
833 .ident = "Lenovo G40-30",
834 .matches = {
835 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
836 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
837 },
838 },
839 {
833 .ident = "Lenovo Yoga 2 11 / 13 / Pro", 840 .ident = "Lenovo Yoga 2 11 / 13 / Pro",
834 .matches = { 841 .matches = {
835 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 842 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7769575345d8..9bb9ad6d4a1b 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2115,7 +2115,7 @@ static int hotkey_mask_get(void)
2115 return 0; 2115 return 0;
2116} 2116}
2117 2117
2118void static hotkey_mask_warn_incomplete_mask(void) 2118static void hotkey_mask_warn_incomplete_mask(void)
2119{ 2119{
2120 /* log only what the user can fix... */ 2120 /* log only what the user can fix... */
2121 const u32 wantedmask = hotkey_driver_mask & 2121 const u32 wantedmask = hotkey_driver_mask &
diff --git a/drivers/power/axp288_fuel_gauge.c b/drivers/power/axp288_fuel_gauge.c
index ca1cc5a47eb1..bd1dbfee2515 100644
--- a/drivers/power/axp288_fuel_gauge.c
+++ b/drivers/power/axp288_fuel_gauge.c
@@ -1149,6 +1149,7 @@ static struct platform_driver axp288_fuel_gauge_driver = {
1149 1149
1150module_platform_driver(axp288_fuel_gauge_driver); 1150module_platform_driver(axp288_fuel_gauge_driver);
1151 1151
1152MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>");
1152MODULE_AUTHOR("Todd Brandt <todd.e.brandt@linux.intel.com>"); 1153MODULE_AUTHOR("Todd Brandt <todd.e.brandt@linux.intel.com>");
1153MODULE_DESCRIPTION("Xpower AXP288 Fuel Gauge Driver"); 1154MODULE_DESCRIPTION("Xpower AXP288 Fuel Gauge Driver");
1154MODULE_LICENSE("GPL"); 1155MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index a57433de5c24..b6b98378faa3 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -1109,6 +1109,14 @@ static void __exit bq27x00_battery_exit(void)
1109} 1109}
1110module_exit(bq27x00_battery_exit); 1110module_exit(bq27x00_battery_exit);
1111 1111
1112#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
1113MODULE_ALIAS("platform:bq27000-battery");
1114#endif
1115
1116#ifdef CONFIG_BATTERY_BQ27X00_I2C
1117MODULE_ALIAS("i2c:bq27000-battery");
1118#endif
1119
1112MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); 1120MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
1113MODULE_DESCRIPTION("BQ27x00 battery monitor driver"); 1121MODULE_DESCRIPTION("BQ27x00 battery monitor driver");
1114MODULE_LICENSE("GPL"); 1122MODULE_LICENSE("GPL");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 2da9ed8ccbb5..8a971b3dbe58 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -347,7 +347,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
347 goto err_psy_reg_main; 347 goto err_psy_reg_main;
348 } 348 }
349 349
350 psy_main_cfg.drv_data = &collie_bat_bu; 350 psy_bu_cfg.drv_data = &collie_bat_bu;
351 collie_bat_bu.psy = power_supply_register(&dev->ucb->dev, 351 collie_bat_bu.psy = power_supply_register(&dev->ucb->dev,
352 &collie_bat_bu_desc, 352 &collie_bat_bu_desc,
353 &psy_bu_cfg); 353 &psy_bu_cfg);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index aad9c3318c02..17d93a73c513 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -41,6 +41,7 @@ config POWER_RESET_AXXIA
41config POWER_RESET_BRCMSTB 41config POWER_RESET_BRCMSTB
42 bool "Broadcom STB reset driver" 42 bool "Broadcom STB reset driver"
43 depends on ARM || MIPS || COMPILE_TEST 43 depends on ARM || MIPS || COMPILE_TEST
44 depends on MFD_SYSCON
44 default ARCH_BRCMSTB 45 default ARCH_BRCMSTB
45 help 46 help
46 This driver provides restart support for Broadcom STB boards. 47 This driver provides restart support for Broadcom STB boards.
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 01c7055c4200..ca461ebc7ae8 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
212 res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 ); 212 res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
213 at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start, 213 at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
214 resource_size(res)); 214 resource_size(res));
215 if (IS_ERR(at91_ramc_base[idx])) { 215 if (!at91_ramc_base[idx]) {
216 dev_err(&pdev->dev, "Could not map ram controller address\n"); 216 dev_err(&pdev->dev, "Could not map ram controller address\n");
217 return PTR_ERR(at91_ramc_base[idx]); 217 return -ENOMEM;
218 } 218 }
219 } 219 }
220 220
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 7ef193b6f7fe..1e08195551fe 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -120,18 +120,7 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer)
120 120
121static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data) 121static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data)
122{ 122{
123 if (hrtimer_start(&data->timer_wde, data->wde_interval, 123 hrtimer_start(&data->timer_wde, data->wde_interval, HRTIMER_MODE_REL);
124 HRTIMER_MODE_REL)) {
125 /*
126 * The device will not toggle the watchdog reset,
127 * thus shut down is only safe if the PowerPath controller
128 * has a long enough time-off before triggering a hardware
129 * power-off.
130 *
131 * Only sending a warning as the system will power-off anyway
132 */
133 dev_err(data->dev, "unable to start the timer\n");
134 }
135} 124}
136 125
137static enum hrtimer_restart 126static enum hrtimer_restart
@@ -165,9 +154,8 @@ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id)
165 } 154 }
166 155
167 if (gpiod_get_value(data->gpio_trigger)) { 156 if (gpiod_get_value(data->gpio_trigger)) {
168 if (hrtimer_start(&data->timer_trigger, data->trigger_delay, 157 hrtimer_start(&data->timer_trigger, data->trigger_delay,
169 HRTIMER_MODE_REL)) 158 HRTIMER_MODE_REL);
170 dev_err(data->dev, "unable to start the wait timer\n");
171 } else { 159 } else {
172 hrtimer_cancel(&data->timer_trigger); 160 hrtimer_cancel(&data->timer_trigger);
173 /* omitting return value check, timer should have been valid */ 161 /* omitting return value check, timer should have been valid */
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6149ae01e11f..0fe4ad8826b2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -164,6 +164,16 @@ config RTC_DRV_ABB5ZES3
164 This driver can also be built as a module. If so, the module 164 This driver can also be built as a module. If so, the module
165 will be called rtc-ab-b5ze-s3. 165 will be called rtc-ab-b5ze-s3.
166 166
167config RTC_DRV_ABX80X
168 tristate "Abracon ABx80x"
169 help
170 If you say yes here you get support for Abracon AB080X and AB180X
171 families of ultra-low-power battery- and capacitor-backed real-time
172 clock chips.
173
174 This driver can also be built as a module. If so, the module
175 will be called rtc-abx80x.
176
167config RTC_DRV_AS3722 177config RTC_DRV_AS3722
168 tristate "ams AS3722 RTC driver" 178 tristate "ams AS3722 RTC driver"
169 depends on MFD_AS3722 179 depends on MFD_AS3722
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index c31731c29762..2b82e2b0311b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
25obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o 25obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
26obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o 26obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
27obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o 27obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
28obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o
28obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o 29obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
29obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o 30obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
30obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o 31obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
new file mode 100644
index 000000000000..4337c3bc6ace
--- /dev/null
+++ b/drivers/rtc/rtc-abx80x.c
@@ -0,0 +1,307 @@
1/*
2 * A driver for the I2C members of the Abracon AB x8xx RTC family,
3 * and compatible: AB 1805 and AB 0805
4 *
5 * Copyright 2014-2015 Macq S.A.
6 *
7 * Author: Philippe De Muyter <phdm@macqel.be>
8 * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/bcd.h>
17#include <linux/i2c.h>
18#include <linux/module.h>
19#include <linux/rtc.h>
20
21#define ABX8XX_REG_HTH 0x00
22#define ABX8XX_REG_SC 0x01
23#define ABX8XX_REG_MN 0x02
24#define ABX8XX_REG_HR 0x03
25#define ABX8XX_REG_DA 0x04
26#define ABX8XX_REG_MO 0x05
27#define ABX8XX_REG_YR 0x06
28#define ABX8XX_REG_WD 0x07
29
30#define ABX8XX_REG_CTRL1 0x10
31#define ABX8XX_CTRL_WRITE BIT(1)
32#define ABX8XX_CTRL_12_24 BIT(6)
33
34#define ABX8XX_REG_CFG_KEY 0x1f
35#define ABX8XX_CFG_KEY_MISC 0x9d
36
37#define ABX8XX_REG_ID0 0x28
38
39#define ABX8XX_REG_TRICKLE 0x20
40#define ABX8XX_TRICKLE_CHARGE_ENABLE 0xa0
41#define ABX8XX_TRICKLE_STANDARD_DIODE 0x8
42#define ABX8XX_TRICKLE_SCHOTTKY_DIODE 0x4
43
44static u8 trickle_resistors[] = {0, 3, 6, 11};
45
46enum abx80x_chip {AB0801, AB0803, AB0804, AB0805,
47 AB1801, AB1803, AB1804, AB1805, ABX80X};
48
49struct abx80x_cap {
50 u16 pn;
51 bool has_tc;
52};
53
54static struct abx80x_cap abx80x_caps[] = {
55 [AB0801] = {.pn = 0x0801},
56 [AB0803] = {.pn = 0x0803},
57 [AB0804] = {.pn = 0x0804, .has_tc = true},
58 [AB0805] = {.pn = 0x0805, .has_tc = true},
59 [AB1801] = {.pn = 0x1801},
60 [AB1803] = {.pn = 0x1803},
61 [AB1804] = {.pn = 0x1804, .has_tc = true},
62 [AB1805] = {.pn = 0x1805, .has_tc = true},
63 [ABX80X] = {.pn = 0}
64};
65
66static struct i2c_driver abx80x_driver;
67
68static int abx80x_enable_trickle_charger(struct i2c_client *client,
69 u8 trickle_cfg)
70{
71 int err;
72
73 /*
74 * Write the configuration key register to enable access to the Trickle
75 * register
76 */
77 err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
78 ABX8XX_CFG_KEY_MISC);
79 if (err < 0) {
80 dev_err(&client->dev, "Unable to write configuration key\n");
81 return -EIO;
82 }
83
84 err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE,
85 ABX8XX_TRICKLE_CHARGE_ENABLE |
86 trickle_cfg);
87 if (err < 0) {
88 dev_err(&client->dev, "Unable to write trickle register\n");
89 return -EIO;
90 }
91
92 return 0;
93}
94
95static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
96{
97 struct i2c_client *client = to_i2c_client(dev);
98 unsigned char buf[8];
99 int err;
100
101 err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH,
102 sizeof(buf), buf);
103 if (err < 0) {
104 dev_err(&client->dev, "Unable to read date\n");
105 return -EIO;
106 }
107
108 tm->tm_sec = bcd2bin(buf[ABX8XX_REG_SC] & 0x7F);
109 tm->tm_min = bcd2bin(buf[ABX8XX_REG_MN] & 0x7F);
110 tm->tm_hour = bcd2bin(buf[ABX8XX_REG_HR] & 0x3F);
111 tm->tm_wday = buf[ABX8XX_REG_WD] & 0x7;
112 tm->tm_mday = bcd2bin(buf[ABX8XX_REG_DA] & 0x3F);
113 tm->tm_mon = bcd2bin(buf[ABX8XX_REG_MO] & 0x1F) - 1;
114 tm->tm_year = bcd2bin(buf[ABX8XX_REG_YR]) + 100;
115
116 err = rtc_valid_tm(tm);
117 if (err < 0)
118 dev_err(&client->dev, "retrieved date/time is not valid.\n");
119
120 return err;
121}
122
123static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
124{
125 struct i2c_client *client = to_i2c_client(dev);
126 unsigned char buf[8];
127 int err;
128
129 if (tm->tm_year < 100)
130 return -EINVAL;
131
132 buf[ABX8XX_REG_HTH] = 0;
133 buf[ABX8XX_REG_SC] = bin2bcd(tm->tm_sec);
134 buf[ABX8XX_REG_MN] = bin2bcd(tm->tm_min);
135 buf[ABX8XX_REG_HR] = bin2bcd(tm->tm_hour);
136 buf[ABX8XX_REG_DA] = bin2bcd(tm->tm_mday);
137 buf[ABX8XX_REG_MO] = bin2bcd(tm->tm_mon + 1);
138 buf[ABX8XX_REG_YR] = bin2bcd(tm->tm_year - 100);
139 buf[ABX8XX_REG_WD] = tm->tm_wday;
140
141 err = i2c_smbus_write_i2c_block_data(client, ABX8XX_REG_HTH,
142 sizeof(buf), buf);
143 if (err < 0) {
144 dev_err(&client->dev, "Unable to write to date registers\n");
145 return -EIO;
146 }
147
148 return 0;
149}
150
151static const struct rtc_class_ops abx80x_rtc_ops = {
152 .read_time = abx80x_rtc_read_time,
153 .set_time = abx80x_rtc_set_time,
154};
155
156static int abx80x_dt_trickle_cfg(struct device_node *np)
157{
158 const char *diode;
159 int trickle_cfg = 0;
160 int i, ret;
161 u32 tmp;
162
163 ret = of_property_read_string(np, "abracon,tc-diode", &diode);
164 if (ret)
165 return ret;
166
167 if (!strcmp(diode, "standard"))
168 trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE;
169 else if (!strcmp(diode, "schottky"))
170 trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE;
171 else
172 return -EINVAL;
173
174 ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp);
175 if (ret)
176 return ret;
177
178 for (i = 0; i < sizeof(trickle_resistors); i++)
179 if (trickle_resistors[i] == tmp)
180 break;
181
182 if (i == sizeof(trickle_resistors))
183 return -EINVAL;
184
185 return (trickle_cfg | i);
186}
187
188static int abx80x_probe(struct i2c_client *client,
189 const struct i2c_device_id *id)
190{
191 struct device_node *np = client->dev.of_node;
192 struct rtc_device *rtc;
193 int i, data, err, trickle_cfg = -EINVAL;
194 char buf[7];
195 unsigned int part = id->driver_data;
196 unsigned int partnumber;
197 unsigned int majrev, minrev;
198 unsigned int lot;
199 unsigned int wafer;
200 unsigned int uid;
201
202 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
203 return -ENODEV;
204
205 err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_ID0,
206 sizeof(buf), buf);
207 if (err < 0) {
208 dev_err(&client->dev, "Unable to read partnumber\n");
209 return -EIO;
210 }
211
212 partnumber = (buf[0] << 8) | buf[1];
213 majrev = buf[2] >> 3;
214 minrev = buf[2] & 0x7;
215 lot = ((buf[4] & 0x80) << 2) | ((buf[6] & 0x80) << 1) | buf[3];
216 uid = ((buf[4] & 0x7f) << 8) | buf[5];
217 wafer = (buf[6] & 0x7c) >> 2;
218 dev_info(&client->dev, "model %04x, revision %u.%u, lot %x, wafer %x, uid %x\n",
219 partnumber, majrev, minrev, lot, wafer, uid);
220
221 data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL1);
222 if (data < 0) {
223 dev_err(&client->dev, "Unable to read control register\n");
224 return -EIO;
225 }
226
227 err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL1,
228 ((data & ~ABX8XX_CTRL_12_24) |
229 ABX8XX_CTRL_WRITE));
230 if (err < 0) {
231 dev_err(&client->dev, "Unable to write control register\n");
232 return -EIO;
233 }
234
235 /* part autodetection */
236 if (part == ABX80X) {
237 for (i = 0; abx80x_caps[i].pn; i++)
238 if (partnumber == abx80x_caps[i].pn)
239 break;
240 if (abx80x_caps[i].pn == 0) {
241 dev_err(&client->dev, "Unknown part: %04x\n",
242 partnumber);
243 return -EINVAL;
244 }
245 part = i;
246 }
247
248 if (partnumber != abx80x_caps[part].pn) {
249 dev_err(&client->dev, "partnumber mismatch %04x != %04x\n",
250 partnumber, abx80x_caps[part].pn);
251 return -EINVAL;
252 }
253
254 if (np && abx80x_caps[part].has_tc)
255 trickle_cfg = abx80x_dt_trickle_cfg(np);
256
257 if (trickle_cfg > 0) {
258 dev_info(&client->dev, "Enabling trickle charger: %02x\n",
259 trickle_cfg);
260 abx80x_enable_trickle_charger(client, trickle_cfg);
261 }
262
263 rtc = devm_rtc_device_register(&client->dev, abx80x_driver.driver.name,
264 &abx80x_rtc_ops, THIS_MODULE);
265
266 if (IS_ERR(rtc))
267 return PTR_ERR(rtc);
268
269 i2c_set_clientdata(client, rtc);
270
271 return 0;
272}
273
274static int abx80x_remove(struct i2c_client *client)
275{
276 return 0;
277}
278
279static const struct i2c_device_id abx80x_id[] = {
280 { "abx80x", ABX80X },
281 { "ab0801", AB0801 },
282 { "ab0803", AB0803 },
283 { "ab0804", AB0804 },
284 { "ab0805", AB0805 },
285 { "ab1801", AB1801 },
286 { "ab1803", AB1803 },
287 { "ab1804", AB1804 },
288 { "ab1805", AB1805 },
289 { }
290};
291MODULE_DEVICE_TABLE(i2c, abx80x_id);
292
293static struct i2c_driver abx80x_driver = {
294 .driver = {
295 .name = "rtc-abx80x",
296 },
297 .probe = abx80x_probe,
298 .remove = abx80x_remove,
299 .id_table = abx80x_id,
300};
301
302module_i2c_driver(abx80x_driver);
303
304MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
305MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
306MODULE_DESCRIPTION("Abracon ABX80X RTC driver");
307MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 43e04af39e09..cb70ced7e0db 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -40,6 +40,13 @@ struct armada38x_rtc {
40 void __iomem *regs; 40 void __iomem *regs;
41 void __iomem *regs_soc; 41 void __iomem *regs_soc;
42 spinlock_t lock; 42 spinlock_t lock;
43 /*
44 * While setting the time, the RTC TIME register should not be
45 * accessed. Setting the RTC time involves sleeping during
46 * 100ms, so a mutex instead of a spinlock is used to protect
47 * it
48 */
49 struct mutex mutex_time;
43 int irq; 50 int irq;
44}; 51};
45 52
@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
59 struct armada38x_rtc *rtc = dev_get_drvdata(dev); 66 struct armada38x_rtc *rtc = dev_get_drvdata(dev);
60 unsigned long time, time_check, flags; 67 unsigned long time, time_check, flags;
61 68
62 spin_lock_irqsave(&rtc->lock, flags); 69 mutex_lock(&rtc->mutex_time);
63
64 time = readl(rtc->regs + RTC_TIME); 70 time = readl(rtc->regs + RTC_TIME);
65 /* 71 /*
66 * WA for failing time set attempts. As stated in HW ERRATA if 72 * WA for failing time set attempts. As stated in HW ERRATA if
@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
71 if ((time_check - time) > 1) 77 if ((time_check - time) > 1)
72 time_check = readl(rtc->regs + RTC_TIME); 78 time_check = readl(rtc->regs + RTC_TIME);
73 79
74 spin_unlock_irqrestore(&rtc->lock, flags); 80 mutex_unlock(&rtc->mutex_time);
75 81
76 rtc_time_to_tm(time_check, tm); 82 rtc_time_to_tm(time_check, tm);
77 83
@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
94 * then wait for 100ms before writing to the time register to be 100 * then wait for 100ms before writing to the time register to be
95 * sure that the data will be taken into account. 101 * sure that the data will be taken into account.
96 */ 102 */
97 spin_lock_irqsave(&rtc->lock, flags); 103 mutex_lock(&rtc->mutex_time);
98
99 rtc_delayed_write(0, rtc, RTC_STATUS); 104 rtc_delayed_write(0, rtc, RTC_STATUS);
100
101 spin_unlock_irqrestore(&rtc->lock, flags);
102
103 msleep(100); 105 msleep(100);
104
105 spin_lock_irqsave(&rtc->lock, flags);
106
107 rtc_delayed_write(time, rtc, RTC_TIME); 106 rtc_delayed_write(time, rtc, RTC_TIME);
107 mutex_unlock(&rtc->mutex_time);
108 108
109 spin_unlock_irqrestore(&rtc->lock, flags);
110out: 109out:
111 return ret; 110 return ret;
112} 111}
@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
230 return -ENOMEM; 229 return -ENOMEM;
231 230
232 spin_lock_init(&rtc->lock); 231 spin_lock_init(&rtc->lock);
232 mutex_init(&rtc->mutex_time);
233 233
234 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc"); 234 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
235 rtc->regs = devm_ioremap_resource(&pdev->dev, res); 235 rtc->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7600639db4c4..add419d6ff34 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); 149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); 150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); 151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
153 152
154/* Functions */ 153/* Functions */
155 154
@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1340 } 1339 }
1341 1340
1342 /* Now complete the io */ 1341 /* Now complete the io */
1342 scsi_dma_unmap(cmd);
1343 cmd->scsi_done(cmd);
1343 tw_dev->state[request_id] = TW_S_COMPLETED; 1344 tw_dev->state[request_id] = TW_S_COMPLETED;
1344 twa_free_request_id(tw_dev, request_id); 1345 twa_free_request_id(tw_dev, request_id);
1345 tw_dev->posted_request_count--; 1346 tw_dev->posted_request_count--;
1346 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1347 twa_unmap_scsi_data(tw_dev, request_id);
1348 } 1347 }
1349 1348
1350 /* Check for valid status after each drain */ 1349 /* Check for valid status after each drain */
@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
1402 } 1401 }
1403} /* End twa_load_sgl() */ 1402} /* End twa_load_sgl() */
1404 1403
1405/* This function will perform a pci-dma mapping for a scatter gather list */
1406static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1407{
1408 int use_sg;
1409 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1410
1411 use_sg = scsi_dma_map(cmd);
1412 if (!use_sg)
1413 return 0;
1414 else if (use_sg < 0) {
1415 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1416 return 0;
1417 }
1418
1419 cmd->SCp.phase = TW_PHASE_SGLIST;
1420 cmd->SCp.have_data_in = use_sg;
1421
1422 return use_sg;
1423} /* End twa_map_scsi_sg_data() */
1424
1425/* This function will poll for a response interrupt of a request */ 1404/* This function will poll for a response interrupt of a request */
1426static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1405static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1427{ 1406{
@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1600 (tw_dev->state[i] != TW_S_INITIAL) && 1579 (tw_dev->state[i] != TW_S_INITIAL) &&
1601 (tw_dev->state[i] != TW_S_COMPLETED)) { 1580 (tw_dev->state[i] != TW_S_COMPLETED)) {
1602 if (tw_dev->srb[i]) { 1581 if (tw_dev->srb[i]) {
1603 tw_dev->srb[i]->result = (DID_RESET << 16); 1582 struct scsi_cmnd *cmd = tw_dev->srb[i];
1604 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1583
1605 twa_unmap_scsi_data(tw_dev, i); 1584 cmd->result = (DID_RESET << 16);
1585 scsi_dma_unmap(cmd);
1586 cmd->scsi_done(cmd);
1606 } 1587 }
1607 } 1588 }
1608 } 1589 }
@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1781 /* Save the scsi command for use by the ISR */ 1762 /* Save the scsi command for use by the ISR */
1782 tw_dev->srb[request_id] = SCpnt; 1763 tw_dev->srb[request_id] = SCpnt;
1783 1764
1784 /* Initialize phase to zero */
1785 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1786
1787 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1788 switch (retval) { 1766 switch (retval) {
1789 case SCSI_MLQUEUE_HOST_BUSY: 1767 case SCSI_MLQUEUE_HOST_BUSY:
1768 scsi_dma_unmap(SCpnt);
1790 twa_free_request_id(tw_dev, request_id); 1769 twa_free_request_id(tw_dev, request_id);
1791 twa_unmap_scsi_data(tw_dev, request_id);
1792 break; 1770 break;
1793 case 1: 1771 case 1:
1794 tw_dev->state[request_id] = TW_S_COMPLETED;
1795 twa_free_request_id(tw_dev, request_id);
1796 twa_unmap_scsi_data(tw_dev, request_id);
1797 SCpnt->result = (DID_ERROR << 16); 1772 SCpnt->result = (DID_ERROR << 16);
1773 scsi_dma_unmap(SCpnt);
1798 done(SCpnt); 1774 done(SCpnt);
1775 tw_dev->state[request_id] = TW_S_COMPLETED;
1776 twa_free_request_id(tw_dev, request_id);
1799 retval = 0; 1777 retval = 0;
1800 } 1778 }
1801out: 1779out:
@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1863 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); 1841 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1864 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); 1842 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1865 } else { 1843 } else {
1866 sg_count = twa_map_scsi_sg_data(tw_dev, request_id); 1844 sg_count = scsi_dma_map(srb);
1867 if (sg_count == 0) 1845 if (sg_count < 0)
1868 goto out; 1846 goto out;
1869 1847
1870 scsi_for_each_sg(srb, sg, sg_count, i) { 1848 scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1979 return(table[index].text); 1957 return(table[index].text);
1980} /* End twa_string_lookup() */ 1958} /* End twa_string_lookup() */
1981 1959
1982/* This function will perform a pci-dma unmap */
1983static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1984{
1985 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1986
1987 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1988 scsi_dma_unmap(cmd);
1989} /* End twa_unmap_scsi_data() */
1990
1991/* This function gets called when a disk is coming on-line */ 1960/* This function gets called when a disk is coming on-line */
1992static int twa_slave_configure(struct scsi_device *sdev) 1961static int twa_slave_configure(struct scsi_device *sdev)
1993{ 1962{
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 040f7214e5b7..0fdc83cfa0e1 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
324#define TW_CURRENT_DRIVER_BUILD 0 324#define TW_CURRENT_DRIVER_BUILD 0
325#define TW_CURRENT_DRIVER_BRANCH 0 325#define TW_CURRENT_DRIVER_BRANCH 0
326 326
327/* Phase defines */
328#define TW_PHASE_INITIAL 0
329#define TW_PHASE_SINGLE 1
330#define TW_PHASE_SGLIST 2
331
332/* Misc defines */ 327/* Misc defines */
333#define TW_9550SX_DRAIN_COMPLETED 0xFFFF 328#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
334#define TW_SECTOR_SIZE 512 329#define TW_SECTOR_SIZE 512
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 2361772d5909..f8374850f714 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
290 return 0; 290 return 0;
291} /* End twl_post_command_packet() */ 291} /* End twl_post_command_packet() */
292 292
293/* This function will perform a pci-dma mapping for a scatter gather list */
294static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
295{
296 int use_sg;
297 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
298
299 use_sg = scsi_dma_map(cmd);
300 if (!use_sg)
301 return 0;
302 else if (use_sg < 0) {
303 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
304 return 0;
305 }
306
307 cmd->SCp.phase = TW_PHASE_SGLIST;
308 cmd->SCp.have_data_in = use_sg;
309
310 return use_sg;
311} /* End twl_map_scsi_sg_data() */
312
313/* This function hands scsi cdb's to the firmware */ 293/* This function hands scsi cdb's to the firmware */
314static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) 294static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
315{ 295{
@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
357 if (!sglistarg) { 337 if (!sglistarg) {
358 /* Map sglist from scsi layer to cmd packet */ 338 /* Map sglist from scsi layer to cmd packet */
359 if (scsi_sg_count(srb)) { 339 if (scsi_sg_count(srb)) {
360 sg_count = twl_map_scsi_sg_data(tw_dev, request_id); 340 sg_count = scsi_dma_map(srb);
361 if (sg_count == 0) 341 if (sg_count <= 0)
362 goto out; 342 goto out;
363 343
364 scsi_for_each_sg(srb, sg, sg_count, i) { 344 scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1102,15 +1082,6 @@ out:
1102 return retval; 1082 return retval;
1103} /* End twl_initialize_device_extension() */ 1083} /* End twl_initialize_device_extension() */
1104 1084
1105/* This function will perform a pci-dma unmap */
1106static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1107{
1108 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1109
1110 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1111 scsi_dma_unmap(cmd);
1112} /* End twl_unmap_scsi_data() */
1113
1114/* This function will handle attention interrupts */ 1085/* This function will handle attention interrupts */
1115static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) 1086static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
1116{ 1087{
@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
1251 } 1222 }
1252 1223
1253 /* Now complete the io */ 1224 /* Now complete the io */
1225 scsi_dma_unmap(cmd);
1226 cmd->scsi_done(cmd);
1254 tw_dev->state[request_id] = TW_S_COMPLETED; 1227 tw_dev->state[request_id] = TW_S_COMPLETED;
1255 twl_free_request_id(tw_dev, request_id); 1228 twl_free_request_id(tw_dev, request_id);
1256 tw_dev->posted_request_count--; 1229 tw_dev->posted_request_count--;
1257 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1258 twl_unmap_scsi_data(tw_dev, request_id);
1259 } 1230 }
1260 1231
1261 /* Check for another response interrupt */ 1232 /* Check for another response interrupt */
@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
1400 if ((tw_dev->state[i] != TW_S_FINISHED) && 1371 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1401 (tw_dev->state[i] != TW_S_INITIAL) && 1372 (tw_dev->state[i] != TW_S_INITIAL) &&
1402 (tw_dev->state[i] != TW_S_COMPLETED)) { 1373 (tw_dev->state[i] != TW_S_COMPLETED)) {
1403 if (tw_dev->srb[i]) { 1374 struct scsi_cmnd *cmd = tw_dev->srb[i];
1404 tw_dev->srb[i]->result = (DID_RESET << 16); 1375
1405 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1376 if (cmd) {
1406 twl_unmap_scsi_data(tw_dev, i); 1377 cmd->result = (DID_RESET << 16);
1378 scsi_dma_unmap(cmd);
1379 cmd->scsi_done(cmd);
1407 } 1380 }
1408 } 1381 }
1409 } 1382 }
@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1507 /* Save the scsi command for use by the ISR */ 1480 /* Save the scsi command for use by the ISR */
1508 tw_dev->srb[request_id] = SCpnt; 1481 tw_dev->srb[request_id] = SCpnt;
1509 1482
1510 /* Initialize phase to zero */
1511 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1512
1513 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1483 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1514 if (retval) { 1484 if (retval) {
1515 tw_dev->state[request_id] = TW_S_COMPLETED; 1485 tw_dev->state[request_id] = TW_S_COMPLETED;
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index d474892701d4..fec6449c7595 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
103#define TW_CURRENT_DRIVER_BUILD 0 103#define TW_CURRENT_DRIVER_BUILD 0
104#define TW_CURRENT_DRIVER_BRANCH 0 104#define TW_CURRENT_DRIVER_BRANCH 0
105 105
106/* Phase defines */
107#define TW_PHASE_INITIAL 0
108#define TW_PHASE_SGLIST 2
109
110/* Misc defines */ 106/* Misc defines */
111#define TW_SECTOR_SIZE 512 107#define TW_SECTOR_SIZE 512
112#define TW_MAX_UNITS 32 108#define TW_MAX_UNITS 32
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c75f2048319f..2940bd769936 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
1271 return 0; 1271 return 0;
1272} /* End tw_initialize_device_extension() */ 1272} /* End tw_initialize_device_extension() */
1273 1273
1274static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1275{
1276 int use_sg;
1277
1278 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
1279
1280 use_sg = scsi_dma_map(cmd);
1281 if (use_sg < 0) {
1282 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
1283 return 0;
1284 }
1285
1286 cmd->SCp.phase = TW_PHASE_SGLIST;
1287 cmd->SCp.have_data_in = use_sg;
1288
1289 return use_sg;
1290} /* End tw_map_scsi_sg_data() */
1291
1292static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1293{
1294 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1295
1296 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1297 scsi_dma_unmap(cmd);
1298} /* End tw_unmap_scsi_data() */
1299
1300/* This function will reset a device extension */ 1274/* This function will reset a device extension */
1301static int tw_reset_device_extension(TW_Device_Extension *tw_dev) 1275static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
1302{ 1276{
@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
1319 srb = tw_dev->srb[i]; 1293 srb = tw_dev->srb[i];
1320 if (srb != NULL) { 1294 if (srb != NULL) {
1321 srb->result = (DID_RESET << 16); 1295 srb->result = (DID_RESET << 16);
1322 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); 1296 scsi_dma_unmap(srb);
1323 tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); 1297 srb->scsi_done(srb);
1324 } 1298 }
1325 } 1299 }
1326 } 1300 }
@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1767 command_packet->byte8.io.lba = lba; 1741 command_packet->byte8.io.lba = lba;
1768 command_packet->byte6.block_count = num_sectors; 1742 command_packet->byte6.block_count = num_sectors;
1769 1743
1770 use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); 1744 use_sg = scsi_dma_map(srb);
1771 if (!use_sg) 1745 if (use_sg <= 0)
1772 return 1; 1746 return 1;
1773 1747
1774 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { 1748 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
1955 /* Save the scsi command for use by the ISR */ 1929 /* Save the scsi command for use by the ISR */
1956 tw_dev->srb[request_id] = SCpnt; 1930 tw_dev->srb[request_id] = SCpnt;
1957 1931
1958 /* Initialize phase to zero */
1959 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1960
1961 switch (*command) { 1932 switch (*command) {
1962 case READ_10: 1933 case READ_10:
1963 case READ_6: 1934 case READ_6:
@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
2185 2156
2186 /* Now complete the io */ 2157 /* Now complete the io */
2187 if ((error != TW_ISR_DONT_COMPLETE)) { 2158 if ((error != TW_ISR_DONT_COMPLETE)) {
2159 scsi_dma_unmap(tw_dev->srb[request_id]);
2160 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2188 tw_dev->state[request_id] = TW_S_COMPLETED; 2161 tw_dev->state[request_id] = TW_S_COMPLETED;
2189 tw_state_request_finish(tw_dev, request_id); 2162 tw_state_request_finish(tw_dev, request_id);
2190 tw_dev->posted_request_count--; 2163 tw_dev->posted_request_count--;
2191 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
2192
2193 tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
2194 } 2164 }
2195 } 2165 }
2196 2166
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 29b0b84ed69e..6f65e663d393 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
195#define TW_AEN_SMART_FAIL 0x000F 195#define TW_AEN_SMART_FAIL 0x000F
196#define TW_AEN_SBUF_FAIL 0x0024 196#define TW_AEN_SBUF_FAIL 0x0024
197 197
198/* Phase defines */
199#define TW_PHASE_INITIAL 0
200#define TW_PHASE_SINGLE 1
201#define TW_PHASE_SGLIST 2
202
203/* Misc defines */ 198/* Misc defines */
204#define TW_ALIGNMENT_6000 64 /* 64 bytes */ 199#define TW_ALIGNMENT_6000 64 /* 64 bytes */
205#define TW_ALIGNMENT_7000 4 /* 4 bytes */ 200#define TW_ALIGNMENT_7000 4 /* 4 bytes */
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index ec432763a29a..b95d2779f467 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -375,9 +375,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
375 u8 lun = cmd->device->lun; 375 u8 lun = cmd->device->lun;
376 unsigned long flags; 376 unsigned long flags;
377 int bufflen = scsi_bufflen(cmd); 377 int bufflen = scsi_bufflen(cmd);
378 int mbo; 378 int mbo, sg_count;
379 struct mailbox *mb = aha1542->mb; 379 struct mailbox *mb = aha1542->mb;
380 struct ccb *ccb = aha1542->ccb; 380 struct ccb *ccb = aha1542->ccb;
381 struct chain *cptr;
381 382
382 if (*cmd->cmnd == REQUEST_SENSE) { 383 if (*cmd->cmnd == REQUEST_SENSE) {
383 /* Don't do the command - we have the sense data already */ 384 /* Don't do the command - we have the sense data already */
@@ -397,6 +398,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
397 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); 398 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
398 } 399 }
399#endif 400#endif
401 if (bufflen) { /* allocate memory before taking host_lock */
402 sg_count = scsi_sg_count(cmd);
403 cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
404 if (!cptr)
405 return SCSI_MLQUEUE_HOST_BUSY;
406 }
407
400 /* Use the outgoing mailboxes in a round-robin fashion, because this 408 /* Use the outgoing mailboxes in a round-robin fashion, because this
401 is how the host adapter will scan for them */ 409 is how the host adapter will scan for them */
402 410
@@ -441,19 +449,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
441 449
442 if (bufflen) { 450 if (bufflen) {
443 struct scatterlist *sg; 451 struct scatterlist *sg;
444 struct chain *cptr; 452 int i;
445 int i, sg_count = scsi_sg_count(cmd);
446 453
447 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 454 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
448 cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, 455 cmd->host_scribble = (void *)cptr;
449 GFP_KERNEL | GFP_DMA);
450 cptr = (struct chain *) cmd->host_scribble;
451 if (cptr == NULL) {
452 /* free the claimed mailbox slot */
453 aha1542->int_cmds[mbo] = NULL;
454 spin_unlock_irqrestore(sh->host_lock, flags);
455 return SCSI_MLQUEUE_HOST_BUSY;
456 }
457 scsi_for_each_sg(cmd, sg, sg_count, i) { 456 scsi_for_each_sg(cmd, sg, sg_count, i) {
458 any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) 457 any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
459 + sg->offset); 458 + sg->offset);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 262ab837a704..9f77d23239a2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -226,6 +226,7 @@ static struct {
226 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 226 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
227 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, 227 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
228 {"Promise", "", NULL, BLIST_SPARSELUN}, 228 {"Promise", "", NULL, BLIST_SPARSELUN},
229 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
229 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, 230 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
230 {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, 231 {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
231 {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, 232 {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 60aae01caa89..6efab1c455e1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
897 */ 897 */
898 if (*bflags & BLIST_MAX_512) 898 if (*bflags & BLIST_MAX_512)
899 blk_queue_max_hw_sectors(sdev->request_queue, 512); 899 blk_queue_max_hw_sectors(sdev->request_queue, 512);
900 /*
901 * Max 1024 sector transfer length for targets that report incorrect
902 * max/optimal lengths and relied on the old block layer safe default
903 */
904 else if (*bflags & BLIST_MAX_1024)
905 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
900 906
901 /* 907 /*
902 * Some devices may not want to have a start command automatically 908 * Some devices may not want to have a start command automatically
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 198f96b7fb45..72b059081559 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -78,6 +78,7 @@ config SPI_ATMEL
78config SPI_BCM2835 78config SPI_BCM2835
79 tristate "BCM2835 SPI controller" 79 tristate "BCM2835 SPI controller"
80 depends on ARCH_BCM2835 || COMPILE_TEST 80 depends on ARCH_BCM2835 || COMPILE_TEST
81 depends on GPIOLIB
81 help 82 help
82 This selects a driver for the Broadcom BCM2835 SPI master. 83 This selects a driver for the Broadcom BCM2835 SPI master.
83 84
@@ -302,7 +303,7 @@ config SPI_FSL_SPI
302config SPI_FSL_DSPI 303config SPI_FSL_DSPI
303 tristate "Freescale DSPI controller" 304 tristate "Freescale DSPI controller"
304 select REGMAP_MMIO 305 select REGMAP_MMIO
305 depends on SOC_VF610 || COMPILE_TEST 306 depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
306 help 307 help
307 This enables support for the Freescale DSPI controller in master 308 This enables support for the Freescale DSPI controller in master
308 mode. VF610 platform uses the controller. 309 mode. VF610 platform uses the controller.
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index f63864a893c5..37875cf942f7 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -164,13 +164,12 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
164 unsigned long xfer_time_us) 164 unsigned long xfer_time_us)
165{ 165{
166 struct bcm2835_spi *bs = spi_master_get_devdata(master); 166 struct bcm2835_spi *bs = spi_master_get_devdata(master);
167 unsigned long timeout = jiffies + 167 /* set timeout to 1 second of maximum polling */
168 max(4 * xfer_time_us * HZ / 1000000, 2uL); 168 unsigned long timeout = jiffies + HZ;
169 169
170 /* enable HW block without interrupts */ 170 /* enable HW block without interrupts */
171 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); 171 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
172 172
173 /* set timeout to 4x the expected time, or 2 jiffies */
174 /* loop until finished the transfer */ 173 /* loop until finished the transfer */
175 while (bs->rx_len) { 174 while (bs->rx_len) {
176 /* read from fifo as much as possible */ 175 /* read from fifo as much as possible */
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 5ef6638d5e8a..840a4984d365 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi)
180{ 180{
181 struct spi_bitbang_cs *cs = spi->controller_state; 181 struct spi_bitbang_cs *cs = spi->controller_state;
182 struct spi_bitbang *bitbang; 182 struct spi_bitbang *bitbang;
183 int retval;
184 unsigned long flags; 183 unsigned long flags;
185 184
186 bitbang = spi_master_get_devdata(spi->master); 185 bitbang = spi_master_get_devdata(spi->master);
@@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi)
197 if (!cs->txrx_word) 196 if (!cs->txrx_word)
198 return -EINVAL; 197 return -EINVAL;
199 198
200 retval = bitbang->setup_transfer(spi, NULL); 199 if (bitbang->setup_transfer) {
201 if (retval < 0) 200 int retval = bitbang->setup_transfer(spi, NULL);
202 return retval; 201 if (retval < 0)
202 return retval;
203 }
203 204
204 dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); 205 dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
205 206
@@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
295 296
296 /* init (-1) or override (1) transfer params */ 297 /* init (-1) or override (1) transfer params */
297 if (do_setup != 0) { 298 if (do_setup != 0) {
298 status = bitbang->setup_transfer(spi, t); 299 if (bitbang->setup_transfer) {
299 if (status < 0) 300 status = bitbang->setup_transfer(spi, t);
300 break; 301 if (status < 0)
302 break;
303 }
301 if (do_setup == -1) 304 if (do_setup == -1)
302 do_setup = 0; 305 do_setup = 0;
303 } 306 }
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 9c46a3058743..896add8cfd3b 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -24,6 +24,7 @@
24#include <linux/of_address.h> 24#include <linux/of_address.h>
25#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/platform_device.h>
27 28
28#include "spi-fsl-cpm.h" 29#include "spi-fsl-cpm.h"
29#include "spi-fsl-lib.h" 30#include "spi-fsl-lib.h"
@@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
269 if (mspi->flags & SPI_CPM2) { 270 if (mspi->flags & SPI_CPM2) {
270 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); 271 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
271 out_be16(spi_base, pram_ofs); 272 out_be16(spi_base, pram_ofs);
272 } else {
273 struct spi_pram __iomem *pram = spi_base;
274 u16 rpbase = in_be16(&pram->rpbase);
275
276 /* Microcode relocation patch applied? */
277 if (rpbase) {
278 pram_ofs = rpbase;
279 } else {
280 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
281 out_be16(spi_base, pram_ofs);
282 }
283 } 273 }
284 274
285 iounmap(spi_base); 275 iounmap(spi_base);
@@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
292 struct device_node *np = dev->of_node; 282 struct device_node *np = dev->of_node;
293 const u32 *iprop; 283 const u32 *iprop;
294 int size; 284 int size;
295 unsigned long pram_ofs;
296 unsigned long bds_ofs; 285 unsigned long bds_ofs;
297 286
298 if (!(mspi->flags & SPI_CPM_MODE)) 287 if (!(mspi->flags & SPI_CPM_MODE))
@@ -319,8 +308,26 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
319 } 308 }
320 } 309 }
321 310
322 pram_ofs = fsl_spi_cpm_get_pram(mspi); 311 if (mspi->flags & SPI_CPM1) {
323 if (IS_ERR_VALUE(pram_ofs)) { 312 struct resource *res;
313 void *pram;
314
315 res = platform_get_resource(to_platform_device(dev),
316 IORESOURCE_MEM, 1);
317 pram = devm_ioremap_resource(dev, res);
318 if (IS_ERR(pram))
319 mspi->pram = NULL;
320 else
321 mspi->pram = pram;
322 } else {
323 unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
324
325 if (IS_ERR_VALUE(pram_ofs))
326 mspi->pram = NULL;
327 else
328 mspi->pram = cpm_muram_addr(pram_ofs);
329 }
330 if (mspi->pram == NULL) {
324 dev_err(dev, "can't allocate spi parameter ram\n"); 331 dev_err(dev, "can't allocate spi parameter ram\n");
325 goto err_pram; 332 goto err_pram;
326 } 333 }
@@ -346,8 +353,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
346 goto err_dummy_rx; 353 goto err_dummy_rx;
347 } 354 }
348 355
349 mspi->pram = cpm_muram_addr(pram_ofs);
350
351 mspi->tx_bd = cpm_muram_addr(bds_ofs); 356 mspi->tx_bd = cpm_muram_addr(bds_ofs);
352 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); 357 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
353 358
@@ -375,7 +380,8 @@ err_dummy_rx:
375err_dummy_tx: 380err_dummy_tx:
376 cpm_muram_free(bds_ofs); 381 cpm_muram_free(bds_ofs);
377err_bds: 382err_bds:
378 cpm_muram_free(pram_ofs); 383 if (!(mspi->flags & SPI_CPM1))
384 cpm_muram_free(cpm_muram_offset(mspi->pram));
379err_pram: 385err_pram:
380 fsl_spi_free_dummy_rx(); 386 fsl_spi_free_dummy_rx();
381 return -ENOMEM; 387 return -ENOMEM;
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d0a73a09a9bd..80d245ac846f 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -359,14 +359,16 @@ static void fsl_espi_rw_trans(struct spi_message *m,
359 struct fsl_espi_transfer *trans, u8 *rx_buff) 359 struct fsl_espi_transfer *trans, u8 *rx_buff)
360{ 360{
361 struct fsl_espi_transfer *espi_trans = trans; 361 struct fsl_espi_transfer *espi_trans = trans;
362 unsigned int n_tx = espi_trans->n_tx; 362 unsigned int total_len = espi_trans->len;
363 unsigned int n_rx = espi_trans->n_rx;
364 struct spi_transfer *t; 363 struct spi_transfer *t;
365 u8 *local_buf; 364 u8 *local_buf;
366 u8 *rx_buf = rx_buff; 365 u8 *rx_buf = rx_buff;
367 unsigned int trans_len; 366 unsigned int trans_len;
368 unsigned int addr; 367 unsigned int addr;
369 int i, pos, loop; 368 unsigned int tx_only;
369 unsigned int rx_pos = 0;
370 unsigned int pos;
371 int i, loop;
370 372
371 local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); 373 local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
372 if (!local_buf) { 374 if (!local_buf) {
@@ -374,36 +376,48 @@ static void fsl_espi_rw_trans(struct spi_message *m,
374 return; 376 return;
375 } 377 }
376 378
377 for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { 379 for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) {
378 trans_len = n_rx - pos; 380 trans_len = total_len - pos;
379 if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
380 trans_len = SPCOM_TRANLEN_MAX - n_tx;
381 381
382 i = 0; 382 i = 0;
383 tx_only = 0;
383 list_for_each_entry(t, &m->transfers, transfer_list) { 384 list_for_each_entry(t, &m->transfers, transfer_list) {
384 if (t->tx_buf) { 385 if (t->tx_buf) {
385 memcpy(local_buf + i, t->tx_buf, t->len); 386 memcpy(local_buf + i, t->tx_buf, t->len);
386 i += t->len; 387 i += t->len;
388 if (!t->rx_buf)
389 tx_only += t->len;
387 } 390 }
388 } 391 }
389 392
393 /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */
394 if (loop > 0)
395 trans_len += tx_only;
396
397 if (trans_len > SPCOM_TRANLEN_MAX)
398 trans_len = SPCOM_TRANLEN_MAX;
399
400 /* Update device offset */
390 if (pos > 0) { 401 if (pos > 0) {
391 addr = fsl_espi_cmd2addr(local_buf); 402 addr = fsl_espi_cmd2addr(local_buf);
392 addr += pos; 403 addr += rx_pos;
393 fsl_espi_addr2cmd(addr, local_buf); 404 fsl_espi_addr2cmd(addr, local_buf);
394 } 405 }
395 406
396 espi_trans->n_tx = n_tx; 407 espi_trans->len = trans_len;
397 espi_trans->n_rx = trans_len;
398 espi_trans->len = trans_len + n_tx;
399 espi_trans->tx_buf = local_buf; 408 espi_trans->tx_buf = local_buf;
400 espi_trans->rx_buf = local_buf; 409 espi_trans->rx_buf = local_buf;
401 fsl_espi_do_trans(m, espi_trans); 410 fsl_espi_do_trans(m, espi_trans);
402 411
403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); 412 /* If there is at least one RX byte then copy it to rx_buf */
413 if (tx_only < SPCOM_TRANLEN_MAX)
414 memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only,
415 trans_len - tx_only);
416
417 rx_pos += trans_len - tx_only;
404 418
405 if (loop > 0) 419 if (loop > 0)
406 espi_trans->actual_length += espi_trans->len - n_tx; 420 espi_trans->actual_length += espi_trans->len - tx_only;
407 else 421 else
408 espi_trans->actual_length += espi_trans->len; 422 espi_trans->actual_length += espi_trans->len;
409 } 423 }
@@ -418,6 +432,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
418 u8 *rx_buf = NULL; 432 u8 *rx_buf = NULL;
419 unsigned int n_tx = 0; 433 unsigned int n_tx = 0;
420 unsigned int n_rx = 0; 434 unsigned int n_rx = 0;
435 unsigned int xfer_len = 0;
421 struct fsl_espi_transfer espi_trans; 436 struct fsl_espi_transfer espi_trans;
422 437
423 list_for_each_entry(t, &m->transfers, transfer_list) { 438 list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -427,11 +442,13 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
427 n_rx += t->len; 442 n_rx += t->len;
428 rx_buf = t->rx_buf; 443 rx_buf = t->rx_buf;
429 } 444 }
445 if ((t->tx_buf) || (t->rx_buf))
446 xfer_len += t->len;
430 } 447 }
431 448
432 espi_trans.n_tx = n_tx; 449 espi_trans.n_tx = n_tx;
433 espi_trans.n_rx = n_rx; 450 espi_trans.n_rx = n_rx;
434 espi_trans.len = n_tx + n_rx; 451 espi_trans.len = xfer_len;
435 espi_trans.actual_length = 0; 452 espi_trans.actual_length = 0;
436 espi_trans.status = 0; 453 espi_trans.status = 0;
437 454
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 4df8942058de..d1a5b9fc3eba 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1210,6 +1210,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1210 struct omap2_mcspi *mcspi; 1210 struct omap2_mcspi *mcspi;
1211 struct omap2_mcspi_dma *mcspi_dma; 1211 struct omap2_mcspi_dma *mcspi_dma;
1212 struct spi_transfer *t; 1212 struct spi_transfer *t;
1213 int status;
1213 1214
1214 spi = m->spi; 1215 spi = m->spi;
1215 mcspi = spi_master_get_devdata(master); 1216 mcspi = spi_master_get_devdata(master);
@@ -1229,7 +1230,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1229 tx_buf ? "tx" : "", 1230 tx_buf ? "tx" : "",
1230 rx_buf ? "rx" : "", 1231 rx_buf ? "rx" : "",
1231 t->bits_per_word); 1232 t->bits_per_word);
1232 return -EINVAL; 1233 status = -EINVAL;
1234 goto out;
1233 } 1235 }
1234 1236
1235 if (m->is_dma_mapped || len < DMA_MIN_BYTES) 1237 if (m->is_dma_mapped || len < DMA_MIN_BYTES)
@@ -1241,7 +1243,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1241 if (dma_mapping_error(mcspi->dev, t->tx_dma)) { 1243 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1242 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1244 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1243 'T', len); 1245 'T', len);
1244 return -EINVAL; 1246 status = -EINVAL;
1247 goto out;
1245 } 1248 }
1246 } 1249 }
1247 if (mcspi_dma->dma_rx && rx_buf != NULL) { 1250 if (mcspi_dma->dma_rx && rx_buf != NULL) {
@@ -1253,14 +1256,19 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1253 if (tx_buf != NULL) 1256 if (tx_buf != NULL)
1254 dma_unmap_single(mcspi->dev, t->tx_dma, 1257 dma_unmap_single(mcspi->dev, t->tx_dma,
1255 len, DMA_TO_DEVICE); 1258 len, DMA_TO_DEVICE);
1256 return -EINVAL; 1259 status = -EINVAL;
1260 goto out;
1257 } 1261 }
1258 } 1262 }
1259 } 1263 }
1260 1264
1261 omap2_mcspi_work(mcspi, m); 1265 omap2_mcspi_work(mcspi, m);
1266 /* spi_finalize_current_message() changes the status inside the
1267 * spi_message, save the status here. */
1268 status = m->status;
1269out:
1262 spi_finalize_current_message(master); 1270 spi_finalize_current_message(master);
1263 return 0; 1271 return status;
1264} 1272}
1265 1273
1266static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) 1274static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d5d7d2235163..50910d85df5a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -583,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
583 rx_dev = master->dma_rx->device->dev; 583 rx_dev = master->dma_rx->device->dev;
584 584
585 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 585 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
586 /*
587 * Restore the original value of tx_buf or rx_buf if they are
588 * NULL.
589 */
590 if (xfer->tx_buf == master->dummy_tx)
591 xfer->tx_buf = NULL;
592 if (xfer->rx_buf == master->dummy_rx)
593 xfer->rx_buf = NULL;
594
586 if (!master->can_dma(master, msg->spi, xfer)) 595 if (!master->can_dma(master, msg->spi, xfer))
587 continue; 596 continue;
588 597
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index b78643f907e7..072dac04a750 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_OMAP4
2 bool "OMAP 4 Camera support" 2 bool "OMAP 4 Camera support"
3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
4 depends on HAS_DMA 4 depends on HAS_DMA
5 select MFD_SYSCON
5 select VIDEOBUF2_DMA_CONTIG 6 select VIDEOBUF2_DMA_CONTIG
6 ---help--- 7 ---help---
7 Driver for an OMAP 4 ISS controller. 8 Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index e0ad5e520e2d..7ced940bd807 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -17,6 +17,7 @@
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/mfd/syscon.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
@@ -1386,6 +1387,16 @@ static int iss_probe(struct platform_device *pdev)
1386 1387
1387 platform_set_drvdata(pdev, iss); 1388 platform_set_drvdata(pdev, iss);
1388 1389
1390 /*
1391 * TODO: When implementing DT support switch to syscon regmap lookup by
1392 * phandle.
1393 */
1394 iss->syscon = syscon_regmap_lookup_by_compatible("syscon");
1395 if (IS_ERR(iss->syscon)) {
1396 ret = PTR_ERR(iss->syscon);
1397 goto error;
1398 }
1399
1389 /* Clocks */ 1400 /* Clocks */
1390 ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP); 1401 ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
1391 if (ret < 0) 1402 if (ret < 0)
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
index 734cfeeb0314..35df8b4709e6 100644
--- a/drivers/staging/media/omap4iss/iss.h
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -29,6 +29,8 @@
29#include "iss_ipipe.h" 29#include "iss_ipipe.h"
30#include "iss_resizer.h" 30#include "iss_resizer.h"
31 31
32struct regmap;
33
32#define to_iss_device(ptr_module) \ 34#define to_iss_device(ptr_module) \
33 container_of(ptr_module, struct iss_device, ptr_module) 35 container_of(ptr_module, struct iss_device, ptr_module)
34#define to_device(ptr_module) \ 36#define to_device(ptr_module) \
@@ -79,6 +81,7 @@ struct iss_reg {
79 81
80/* 82/*
81 * struct iss_device - ISS device structure. 83 * struct iss_device - ISS device structure.
84 * @syscon: Regmap for the syscon register space
82 * @crashed: Bitmask of crashed entities (indexed by entity ID) 85 * @crashed: Bitmask of crashed entities (indexed by entity ID)
83 */ 86 */
84struct iss_device { 87struct iss_device {
@@ -93,6 +96,7 @@ struct iss_device {
93 96
94 struct resource *res[OMAP4_ISS_MEM_LAST]; 97 struct resource *res[OMAP4_ISS_MEM_LAST];
95 void __iomem *regs[OMAP4_ISS_MEM_LAST]; 98 void __iomem *regs[OMAP4_ISS_MEM_LAST];
99 struct regmap *syscon;
96 100
97 u64 raw_dmamask; 101 u64 raw_dmamask;
98 102
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
index 7c3d55d811ef..748607f8918f 100644
--- a/drivers/staging/media/omap4iss/iss_csiphy.c
+++ b/drivers/staging/media/omap4iss/iss_csiphy.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/regmap.h>
16 17
17#include "../../../../arch/arm/mach-omap2/control.h" 18#include "../../../../arch/arm/mach-omap2/control.h"
18 19
@@ -140,9 +141,11 @@ int omap4iss_csiphy_config(struct iss_device *iss,
140 * - bit [18] : CSIPHY1 CTRLCLK enable 141 * - bit [18] : CSIPHY1 CTRLCLK enable
141 * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2 142 * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
142 */ 143 */
143 cam_rx_ctrl = omap4_ctrl_pad_readl( 144 /*
144 OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX); 145 * TODO: When implementing DT support specify the CONTROL_CAMERA_RX
145 146 * register offset in the syscon property instead of hardcoding it.
147 */
148 regmap_read(iss->syscon, 0x68, &cam_rx_ctrl);
146 149
147 if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) { 150 if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
148 cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK | 151 cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
@@ -166,8 +169,7 @@ int omap4iss_csiphy_config(struct iss_device *iss,
166 cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK; 169 cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
167 } 170 }
168 171
169 omap4_ctrl_pad_writel(cam_rx_ctrl, 172 regmap_write(iss->syscon, 0x68, cam_rx_ctrl);
170 OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
171 173
172 /* Reset used lane count */ 174 /* Reset used lane count */
173 csi2->phy->used_data_lanes = 0; 175 csi2->phy->used_data_lanes = 0;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index f1e57425e39f..5bab1c684bb1 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
299 return 0; 299 return 0;
300} 300}
301 301
302static void xen_console_update_evtchn(struct xencons_info *info)
303{
304 if (xen_hvm_domain()) {
305 uint64_t v;
306 int err;
307
308 err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
309 if (!err && v)
310 info->evtchn = v;
311 } else
312 info->evtchn = xen_start_info->console.domU.evtchn;
313}
314
302void xen_console_resume(void) 315void xen_console_resume(void)
303{ 316{
304 struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); 317 struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
305 if (info != NULL && info->irq) 318 if (info != NULL && info->irq) {
319 if (!xen_initial_domain())
320 xen_console_update_evtchn(info);
306 rebind_evtchn_irq(info->evtchn, info->irq); 321 rebind_evtchn_irq(info->evtchn, info->irq);
322 }
307} 323}
308 324
309static void xencons_disconnect_backend(struct xencons_info *info) 325static void xencons_disconnect_backend(struct xencons_info *info)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 69fab0fd15ae..e9851add6f4e 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -907,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count)
907 mutex_lock(&vdev->igate); 907 mutex_lock(&vdev->igate);
908 908
909 if (vdev->req_trigger) { 909 if (vdev->req_trigger) {
910 dev_dbg(&vdev->pdev->dev, "Requesting device from user\n"); 910 if (!(count % 10))
911 dev_notice_ratelimited(&vdev->pdev->dev,
912 "Relaying device request to user (#%u)\n",
913 count);
911 eventfd_signal(vdev->req_trigger, 1); 914 eventfd_signal(vdev->req_trigger, 1);
915 } else if (count == 0) {
916 dev_warn(&vdev->pdev->dev,
917 "No device request channel registered, blocked until released by user\n");
912 } 918 }
913 919
914 mutex_unlock(&vdev->igate); 920 mutex_unlock(&vdev->igate);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 0d336625ac71..e1278fe04b1e 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -710,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev)
710 void *device_data = device->device_data; 710 void *device_data = device->device_data;
711 struct vfio_unbound_dev *unbound; 711 struct vfio_unbound_dev *unbound;
712 unsigned int i = 0; 712 unsigned int i = 0;
713 long ret;
714 bool interrupted = false;
713 715
714 /* 716 /*
715 * The group exists so long as we have a device reference. Get 717 * The group exists so long as we have a device reference. Get
@@ -755,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev)
755 757
756 vfio_device_put(device); 758 vfio_device_put(device);
757 759
758 } while (wait_event_interruptible_timeout(vfio.release_q, 760 if (interrupted) {
759 !vfio_dev_present(group, dev), 761 ret = wait_event_timeout(vfio.release_q,
760 HZ * 10) <= 0); 762 !vfio_dev_present(group, dev), HZ * 10);
763 } else {
764 ret = wait_event_interruptible_timeout(vfio.release_q,
765 !vfio_dev_present(group, dev), HZ * 10);
766 if (ret == -ERESTARTSYS) {
767 interrupted = true;
768 dev_warn(dev,
769 "Device is currently in use, task"
770 " \"%s\" (%d) "
771 "blocked until device is released",
772 current->comm, task_pid_nr(current));
773 }
774 }
775 } while (ret <= 0);
761 776
762 vfio_group_put(group); 777 vfio_group_put(group);
763 778
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 5db43fc100a4..7dd46312c180 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
345 return IRQ_HANDLED; 345 return IRQ_HANDLED;
346} 346}
347 347
348static void evtchn_2l_resume(void)
349{
350 int i;
351
352 for_each_online_cpu(i)
353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
354 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
355}
356
348static const struct evtchn_ops evtchn_ops_2l = { 357static const struct evtchn_ops evtchn_ops_2l = {
349 .max_channels = evtchn_2l_max_channels, 358 .max_channels = evtchn_2l_max_channels,
350 .nr_channels = evtchn_2l_max_channels, 359 .nr_channels = evtchn_2l_max_channels,
@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
356 .mask = evtchn_2l_mask, 365 .mask = evtchn_2l_mask,
357 .unmask = evtchn_2l_unmask, 366 .unmask = evtchn_2l_unmask,
358 .handle_events = evtchn_2l_handle_events, 367 .handle_events = evtchn_2l_handle_events,
368 .resume = evtchn_2l_resume,
359}; 369};
360 370
361void __init xen_evtchn_2l_init(void) 371void __init xen_evtchn_2l_init(void)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 70fba973a107..2b8553bd8715 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
529 if (rc) 529 if (rc)
530 goto err; 530 goto err;
531 531
532 bind_evtchn_to_cpu(evtchn, 0);
533 info->evtchn = evtchn; 532 info->evtchn = evtchn;
533 bind_evtchn_to_cpu(evtchn, 0);
534 534
535 rc = xen_evtchn_port_setup(info); 535 rc = xen_evtchn_port_setup(info);
536 if (rc) 536 if (rc)
@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
1279 1279
1280 mutex_unlock(&irq_mapping_update_lock); 1280 mutex_unlock(&irq_mapping_update_lock);
1281 1281
1282 /* new event channels are always bound to cpu 0 */ 1282 bind_evtchn_to_cpu(evtchn, info->cpu);
1283 irq_set_affinity(irq, cpumask_of(0)); 1283 /* This will be deferred until interrupt is processed */
1284 irq_set_affinity(irq, cpumask_of(info->cpu));
1284 1285
1285 /* Unmask the event channel. */ 1286 /* Unmask the event channel. */
1286 enable_irq(irq); 1287 enable_irq(irq);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index d5bb1a33d0a3..89274850741b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map)
327 return err; 327 return err;
328} 328}
329 329
330struct unmap_grant_pages_callback_data
331{
332 struct completion completion;
333 int result;
334};
335
336static void unmap_grant_callback(int result,
337 struct gntab_unmap_queue_data *data)
338{
339 struct unmap_grant_pages_callback_data* d = data->data;
340
341 d->result = result;
342 complete(&d->completion);
343}
344
345static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) 330static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
346{ 331{
347 int i, err = 0; 332 int i, err = 0;
348 struct gntab_unmap_queue_data unmap_data; 333 struct gntab_unmap_queue_data unmap_data;
349 struct unmap_grant_pages_callback_data data;
350
351 init_completion(&data.completion);
352 unmap_data.data = &data;
353 unmap_data.done= &unmap_grant_callback;
354 334
355 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 335 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
356 int pgno = (map->notify.addr >> PAGE_SHIFT); 336 int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
367 unmap_data.pages = map->pages + offset; 347 unmap_data.pages = map->pages + offset;
368 unmap_data.count = pages; 348 unmap_data.count = pages;
369 349
370 gnttab_unmap_refs_async(&unmap_data); 350 err = gnttab_unmap_refs_sync(&unmap_data);
371 351 if (err)
372 wait_for_completion(&data.completion); 352 return err;
373 if (data.result)
374 return data.result;
375 353
376 for (i = 0; i < pages; i++) { 354 for (i = 0; i < pages; i++) {
377 if (map->unmap_ops[offset+i].status) 355 if (map->unmap_ops[offset+i].status)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 17972fbacddc..b1c7170e5c9e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -123,6 +123,11 @@ struct gnttab_ops {
123 int (*query_foreign_access)(grant_ref_t ref); 123 int (*query_foreign_access)(grant_ref_t ref);
124}; 124};
125 125
126struct unmap_refs_callback_data {
127 struct completion completion;
128 int result;
129};
130
126static struct gnttab_ops *gnttab_interface; 131static struct gnttab_ops *gnttab_interface;
127 132
128static int grant_table_version; 133static int grant_table_version;
@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
863} 868}
864EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); 869EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
865 870
871static void unmap_refs_callback(int result,
872 struct gntab_unmap_queue_data *data)
873{
874 struct unmap_refs_callback_data *d = data->data;
875
876 d->result = result;
877 complete(&d->completion);
878}
879
880int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
881{
882 struct unmap_refs_callback_data data;
883
884 init_completion(&data.completion);
885 item->data = &data;
886 item->done = &unmap_refs_callback;
887 gnttab_unmap_refs_async(item);
888 wait_for_completion(&data.completion);
889
890 return data.result;
891}
892EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
893
866static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 894static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
867{ 895{
868 int rc; 896 int rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index bf1940706422..9e6a85104a20 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -131,6 +131,8 @@ static void do_suspend(void)
131 goto out_resume; 131 goto out_resume;
132 } 132 }
133 133
134 xen_arch_suspend();
135
134 si.cancelled = 1; 136 si.cancelled = 1;
135 137
136 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 138 err = stop_machine(xen_suspend, &si, cpumask_of(0));
@@ -148,11 +150,12 @@ static void do_suspend(void)
148 si.cancelled = 1; 150 si.cancelled = 1;
149 } 151 }
150 152
153 xen_arch_resume();
154
151out_resume: 155out_resume:
152 if (!si.cancelled) { 156 if (!si.cancelled)
153 xen_arch_resume();
154 xs_resume(); 157 xs_resume();
155 } else 158 else
156 xs_suspend_cancel(); 159 xs_suspend_cancel();
157 160
158 dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 161 dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 810ad419e34c..4c549323c605 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -235,7 +235,7 @@ retry:
235#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 235#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
236#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 236#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
237 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 237 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
238 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); 238 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
239 if (xen_io_tlb_start) 239 if (xen_io_tlb_start)
240 break; 240 break;
241 order--; 241 order--;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 75fe3d466515..9c234209d8b5 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,8 +16,8 @@
16#include "conf_space.h" 16#include "conf_space.h"
17#include "conf_space_quirks.h" 17#include "conf_space_quirks.h"
18 18
19bool permissive; 19bool xen_pcibk_permissive;
20module_param(permissive, bool, 0644); 20module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
21 21
22/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, 22/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
23 * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ 23 * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
262 * This means that some fields may still be read-only because 262 * This means that some fields may still be read-only because
263 * they have entries in the config_field list that intercept 263 * they have entries in the config_field list that intercept
264 * the write and do nothing. */ 264 * the write and do nothing. */
265 if (dev_data->permissive || permissive) { 265 if (dev_data->permissive || xen_pcibk_permissive) {
266 switch (size) { 266 switch (size) {
267 case 1: 267 case 1:
268 err = pci_write_config_byte(dev, offset, 268 err = pci_write_config_byte(dev, offset,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 2e1d73d1d5d0..62461a8ba1d6 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,7 +64,7 @@ struct config_field_entry {
64 void *data; 64 void *data;
65}; 65};
66 66
67extern bool permissive; 67extern bool xen_pcibk_permissive;
68 68
69#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) 69#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
70 70
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index c2260a0456c9..ad3d17d29c81 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
118 118
119 cmd->val = value; 119 cmd->val = value;
120 120
121 if (!permissive && (!dev_data || !dev_data->permissive)) 121 if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
122 return 0; 122 return 0;
123 123
124 /* Only allow the guest to control certain bits. */ 124 /* Only allow the guest to control certain bits. */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 564b31584860..5390a674b5e3 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -57,6 +57,7 @@
57#include <xen/xen.h> 57#include <xen/xen.h>
58#include <xen/xenbus.h> 58#include <xen/xenbus.h>
59#include <xen/events.h> 59#include <xen/events.h>
60#include <xen/xen-ops.h>
60#include <xen/page.h> 61#include <xen/page.h>
61 62
62#include <xen/hvm.h> 63#include <xen/hvm.h>
@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
735 return err; 736 return err;
736} 737}
737 738
739static int xenbus_resume_cb(struct notifier_block *nb,
740 unsigned long action, void *data)
741{
742 int err = 0;
743
744 if (xen_hvm_domain()) {
745 uint64_t v;
746
747 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
748 if (!err && v)
749 xen_store_evtchn = v;
750 else
751 pr_warn("Cannot update xenstore event channel: %d\n",
752 err);
753 } else
754 xen_store_evtchn = xen_start_info->store_evtchn;
755
756 return err;
757}
758
759static struct notifier_block xenbus_resume_nb = {
760 .notifier_call = xenbus_resume_cb,
761};
762
738static int __init xenbus_init(void) 763static int __init xenbus_init(void)
739{ 764{
740 int err = 0; 765 int err = 0;
@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
793 goto out_error; 818 goto out_error;
794 } 819 }
795 820
821 if ((xen_store_domain_type != XS_LOCAL) &&
822 (xen_store_domain_type != XS_UNKNOWN))
823 xen_resume_notifier_register(&xenbus_resume_nb);
824
796#ifdef CONFIG_XEN_COMPAT_XENFS 825#ifdef CONFIG_XEN_COMPAT_XENFS
797 /* 826 /*
798 * Create xenfs mountpoint in /proc for compatibility with 827 * Create xenfs mountpoint in /proc for compatibility with
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 41c510b7cc11..5e020d76fd07 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -86,7 +86,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
86 86
87 mapping_set_gfp_mask(inode->i_mapping, 87 mapping_set_gfp_mask(inode->i_mapping,
88 mapping_gfp_mask(inode->i_mapping) & 88 mapping_gfp_mask(inode->i_mapping) &
89 ~(GFP_NOFS & ~__GFP_HIGHMEM)); 89 ~(__GFP_FS | __GFP_HIGHMEM));
90 90
91 return inode; 91 return inode;
92} 92}
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index da94e41bdbf6..537356742091 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -173,5 +173,5 @@ MODULE_LICENSE("GPL");
173MODULE_VERSION("0.0.2"); 173MODULE_VERSION("0.0.2");
174MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration."); 174MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
175 175
176module_init(configfs_init); 176core_initcall(configfs_init);
177module_exit(configfs_exit); 177module_exit(configfs_exit);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 59fedbcf8798..86a2121828c3 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
121 int len, i; 121 int len, i;
122 int err = -ENOMEM; 122 int err = -ENOMEM;
123 123
124 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 124 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
125 if (!entry) 125 if (!entry)
126 return err; 126 return err;
127 127
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 18228c201f7f..024f2284d3f6 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,8 +64,8 @@ config EXT4_FS_SECURITY
64 If you are not using a security module that requires using 64 If you are not using a security module that requires using
65 extended attributes for file security labels, say N. 65 extended attributes for file security labels, say N.
66 66
67config EXT4_FS_ENCRYPTION 67config EXT4_ENCRYPTION
68 bool "Ext4 Encryption" 68 tristate "Ext4 Encryption"
69 depends on EXT4_FS 69 depends on EXT4_FS
70 select CRYPTO_AES 70 select CRYPTO_AES
71 select CRYPTO_CBC 71 select CRYPTO_CBC
@@ -81,6 +81,11 @@ config EXT4_FS_ENCRYPTION
81 efficient since it avoids caching the encrypted and 81 efficient since it avoids caching the encrypted and
82 decrypted pages in the page cache. 82 decrypted pages in the page cache.
83 83
84config EXT4_FS_ENCRYPTION
85 bool
86 default y
87 depends on EXT4_ENCRYPTION
88
84config EXT4_DEBUG 89config EXT4_DEBUG
85 bool "EXT4 debugging support" 90 bool "EXT4 debugging support"
86 depends on EXT4_FS 91 depends on EXT4_FS
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index ca2f5948c1ac..fded02f72299 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -66,6 +66,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
66 int res = 0; 66 int res = 0;
67 char iv[EXT4_CRYPTO_BLOCK_SIZE]; 67 char iv[EXT4_CRYPTO_BLOCK_SIZE];
68 struct scatterlist sg[1]; 68 struct scatterlist sg[1];
69 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
69 char *workbuf; 70 char *workbuf;
70 71
71 if (iname->len <= 0 || iname->len > ctx->lim) 72 if (iname->len <= 0 || iname->len > ctx->lim)
@@ -73,6 +74,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
73 74
74 ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? 75 ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
75 EXT4_CRYPTO_BLOCK_SIZE : iname->len; 76 EXT4_CRYPTO_BLOCK_SIZE : iname->len;
77 ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
76 ciphertext_len = (ciphertext_len > ctx->lim) 78 ciphertext_len = (ciphertext_len > ctx->lim)
77 ? ctx->lim : ciphertext_len; 79 ? ctx->lim : ciphertext_len;
78 80
@@ -101,7 +103,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
101 /* Create encryption request */ 103 /* Create encryption request */
102 sg_init_table(sg, 1); 104 sg_init_table(sg, 1);
103 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); 105 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
104 ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv); 106 ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv);
105 res = crypto_ablkcipher_encrypt(req); 107 res = crypto_ablkcipher_encrypt(req);
106 if (res == -EINPROGRESS || res == -EBUSY) { 108 if (res == -EINPROGRESS || res == -EBUSY) {
107 BUG_ON(req->base.data != &ecr); 109 BUG_ON(req->base.data != &ecr);
@@ -198,106 +200,57 @@ static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
198 return oname->len; 200 return oname->len;
199} 201}
200 202
203static const char *lookup_table =
204 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
205
201/** 206/**
202 * ext4_fname_encode_digest() - 207 * ext4_fname_encode_digest() -
203 * 208 *
204 * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. 209 * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
205 * The encoded string is roughly 4/3 times the size of the input string. 210 * The encoded string is roughly 4/3 times the size of the input string.
206 */ 211 */
207int ext4_fname_encode_digest(char *dst, char *src, u32 len) 212static int digest_encode(const char *src, int len, char *dst)
208{ 213{
209 static const char *lookup_table = 214 int i = 0, bits = 0, ac = 0;
210 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+"; 215 char *cp = dst;
211 u32 current_chunk, num_chunks, i; 216
212 char tmp_buf[3]; 217 while (i < len) {
213 u32 c0, c1, c2, c3; 218 ac += (((unsigned char) src[i]) << bits);
214 219 bits += 8;
215 current_chunk = 0; 220 do {
216 num_chunks = len/3; 221 *cp++ = lookup_table[ac & 0x3f];
217 for (i = 0; i < num_chunks; i++) { 222 ac >>= 6;
218 c0 = src[3*i] & 0x3f; 223 bits -= 6;
219 c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f; 224 } while (bits >= 6);
220 c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f;
221 c3 = (src[3*i+2]>>2) & 0x3f;
222 dst[4*i] = lookup_table[c0];
223 dst[4*i+1] = lookup_table[c1];
224 dst[4*i+2] = lookup_table[c2];
225 dst[4*i+3] = lookup_table[c3];
226 }
227 if (i*3 < len) {
228 memset(tmp_buf, 0, 3);
229 memcpy(tmp_buf, &src[3*i], len-3*i);
230 c0 = tmp_buf[0] & 0x3f;
231 c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f;
232 c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f;
233 c3 = (tmp_buf[2]>>2) & 0x3f;
234 dst[4*i] = lookup_table[c0];
235 dst[4*i+1] = lookup_table[c1];
236 dst[4*i+2] = lookup_table[c2];
237 dst[4*i+3] = lookup_table[c3];
238 i++; 225 i++;
239 } 226 }
240 return (i * 4); 227 if (bits)
228 *cp++ = lookup_table[ac & 0x3f];
229 return cp - dst;
241} 230}
242 231
243/** 232static int digest_decode(const char *src, int len, char *dst)
244 * ext4_fname_hash() -
245 *
246 * This function computes the hash of the input filename, and sets the output
247 * buffer to the *encoded* digest. It returns the length of the digest as its
248 * return value. Errors are returned as negative numbers. We trust the caller
249 * to allocate sufficient memory to oname string.
250 */
251static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx,
252 const struct ext4_str *iname,
253 struct ext4_str *oname)
254{ 233{
255 struct scatterlist sg; 234 int i = 0, bits = 0, ac = 0;
256 struct hash_desc desc = { 235 const char *p;
257 .tfm = (struct crypto_hash *)ctx->htfm, 236 char *cp = dst;
258 .flags = CRYPTO_TFM_REQ_MAY_SLEEP 237
259 }; 238 while (i < len) {
260 int res = 0; 239 p = strchr(lookup_table, src[i]);
261 240 if (p == NULL || src[i] == 0)
262 if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { 241 return -2;
263 res = ext4_fname_encode_digest(oname->name, iname->name, 242 ac += (p - lookup_table) << bits;
264 iname->len); 243 bits += 6;
265 oname->len = res; 244 if (bits >= 8) {
266 return res; 245 *cp++ = ac & 0xff;
267 } 246 ac >>= 8;
268 247 bits -= 8;
269 sg_init_one(&sg, iname->name, iname->len); 248 }
270 res = crypto_hash_init(&desc); 249 i++;
271 if (res) {
272 printk(KERN_ERR
273 "%s: Error initializing crypto hash; res = [%d]\n",
274 __func__, res);
275 goto out;
276 }
277 res = crypto_hash_update(&desc, &sg, iname->len);
278 if (res) {
279 printk(KERN_ERR
280 "%s: Error updating crypto hash; res = [%d]\n",
281 __func__, res);
282 goto out;
283 }
284 res = crypto_hash_final(&desc,
285 &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]);
286 if (res) {
287 printk(KERN_ERR
288 "%s: Error finalizing crypto hash; res = [%d]\n",
289 __func__, res);
290 goto out;
291 } 250 }
292 /* Encode the digest as a printable string--this will increase the 251 if (ac)
293 * size of the digest */ 252 return -1;
294 oname->name[0] = 'I'; 253 return cp - dst;
295 res = ext4_fname_encode_digest(oname->name+1,
296 &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE],
297 EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1;
298 oname->len = res;
299out:
300 return res;
301} 254}
302 255
303/** 256/**
@@ -405,6 +358,7 @@ struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
405 if (IS_ERR(ctx)) 358 if (IS_ERR(ctx))
406 return ctx; 359 return ctx;
407 360
361 ctx->flags = ei->i_crypt_policy_flags;
408 if (ctx->has_valid_key) { 362 if (ctx->has_valid_key) {
409 if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { 363 if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
410 printk_once(KERN_WARNING 364 printk_once(KERN_WARNING
@@ -517,6 +471,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
517 u32 namelen) 471 u32 namelen)
518{ 472{
519 u32 ciphertext_len; 473 u32 ciphertext_len;
474 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
520 475
521 if (ctx == NULL) 476 if (ctx == NULL)
522 return -EIO; 477 return -EIO;
@@ -524,6 +479,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
524 return -EACCES; 479 return -EACCES;
525 ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ? 480 ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
526 EXT4_CRYPTO_BLOCK_SIZE : namelen; 481 EXT4_CRYPTO_BLOCK_SIZE : namelen;
482 ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
527 ciphertext_len = (ciphertext_len > ctx->lim) 483 ciphertext_len = (ciphertext_len > ctx->lim)
528 ? ctx->lim : ciphertext_len; 484 ? ctx->lim : ciphertext_len;
529 return (int) ciphertext_len; 485 return (int) ciphertext_len;
@@ -539,10 +495,13 @@ int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
539 u32 ilen, struct ext4_str *crypto_str) 495 u32 ilen, struct ext4_str *crypto_str)
540{ 496{
541 unsigned int olen; 497 unsigned int olen;
498 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
542 499
543 if (!ctx) 500 if (!ctx)
544 return -EIO; 501 return -EIO;
545 olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE); 502 if (padding < EXT4_CRYPTO_BLOCK_SIZE)
503 padding = EXT4_CRYPTO_BLOCK_SIZE;
504 olen = ext4_fname_crypto_round_up(ilen, padding);
546 crypto_str->len = olen; 505 crypto_str->len = olen;
547 if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) 506 if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
548 olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; 507 olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
@@ -571,9 +530,13 @@ void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
571 * ext4_fname_disk_to_usr() - converts a filename from disk space to user space 530 * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
572 */ 531 */
573int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 532int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
574 const struct ext4_str *iname, 533 struct dx_hash_info *hinfo,
575 struct ext4_str *oname) 534 const struct ext4_str *iname,
535 struct ext4_str *oname)
576{ 536{
537 char buf[24];
538 int ret;
539
577 if (ctx == NULL) 540 if (ctx == NULL)
578 return -EIO; 541 return -EIO;
579 if (iname->len < 3) { 542 if (iname->len < 3) {
@@ -587,18 +550,33 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
587 } 550 }
588 if (ctx->has_valid_key) 551 if (ctx->has_valid_key)
589 return ext4_fname_decrypt(ctx, iname, oname); 552 return ext4_fname_decrypt(ctx, iname, oname);
590 else 553
591 return ext4_fname_hash(ctx, iname, oname); 554 if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
555 ret = digest_encode(iname->name, iname->len, oname->name);
556 oname->len = ret;
557 return ret;
558 }
559 if (hinfo) {
560 memcpy(buf, &hinfo->hash, 4);
561 memcpy(buf+4, &hinfo->minor_hash, 4);
562 } else
563 memset(buf, 0, 8);
564 memcpy(buf + 8, iname->name + iname->len - 16, 16);
565 oname->name[0] = '_';
566 ret = digest_encode(buf, 24, oname->name+1);
567 oname->len = ret + 1;
568 return ret + 1;
592} 569}
593 570
594int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 571int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
572 struct dx_hash_info *hinfo,
595 const struct ext4_dir_entry_2 *de, 573 const struct ext4_dir_entry_2 *de,
596 struct ext4_str *oname) 574 struct ext4_str *oname)
597{ 575{
598 struct ext4_str iname = {.name = (unsigned char *) de->name, 576 struct ext4_str iname = {.name = (unsigned char *) de->name,
599 .len = de->name_len }; 577 .len = de->name_len };
600 578
601 return _ext4_fname_disk_to_usr(ctx, &iname, oname); 579 return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname);
602} 580}
603 581
604 582
@@ -640,10 +618,11 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
640 const struct qstr *iname, 618 const struct qstr *iname,
641 struct dx_hash_info *hinfo) 619 struct dx_hash_info *hinfo)
642{ 620{
643 struct ext4_str tmp, tmp2; 621 struct ext4_str tmp;
644 int ret = 0; 622 int ret = 0;
623 char buf[EXT4_FNAME_CRYPTO_DIGEST_SIZE+1];
645 624
646 if (!ctx || !ctx->has_valid_key || 625 if (!ctx ||
647 ((iname->name[0] == '.') && 626 ((iname->name[0] == '.') &&
648 ((iname->len == 1) || 627 ((iname->len == 1) ||
649 ((iname->name[1] == '.') && (iname->len == 2))))) { 628 ((iname->name[1] == '.') && (iname->len == 2))))) {
@@ -651,59 +630,90 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
651 return 0; 630 return 0;
652 } 631 }
653 632
633 if (!ctx->has_valid_key && iname->name[0] == '_') {
634 if (iname->len != 33)
635 return -ENOENT;
636 ret = digest_decode(iname->name+1, iname->len, buf);
637 if (ret != 24)
638 return -ENOENT;
639 memcpy(&hinfo->hash, buf, 4);
640 memcpy(&hinfo->minor_hash, buf + 4, 4);
641 return 0;
642 }
643
644 if (!ctx->has_valid_key && iname->name[0] != '_') {
645 if (iname->len > 43)
646 return -ENOENT;
647 ret = digest_decode(iname->name, iname->len, buf);
648 ext4fs_dirhash(buf, ret, hinfo);
649 return 0;
650 }
651
654 /* First encrypt the plaintext name */ 652 /* First encrypt the plaintext name */
655 ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); 653 ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
656 if (ret < 0) 654 if (ret < 0)
657 return ret; 655 return ret;
658 656
659 ret = ext4_fname_encrypt(ctx, iname, &tmp); 657 ret = ext4_fname_encrypt(ctx, iname, &tmp);
660 if (ret < 0) 658 if (ret >= 0) {
661 goto out; 659 ext4fs_dirhash(tmp.name, tmp.len, hinfo);
662 660 ret = 0;
663 tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
664 tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL);
665 if (tmp2.name == NULL) {
666 ret = -ENOMEM;
667 goto out;
668 } 661 }
669 662
670 ret = ext4_fname_hash(ctx, &tmp, &tmp2);
671 if (ret > 0)
672 ext4fs_dirhash(tmp2.name, tmp2.len, hinfo);
673 ext4_fname_crypto_free_buffer(&tmp2);
674out:
675 ext4_fname_crypto_free_buffer(&tmp); 663 ext4_fname_crypto_free_buffer(&tmp);
676 return ret; 664 return ret;
677} 665}
678 666
679/** 667int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
680 * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string 668 int len, const char * const name,
681 */ 669 struct ext4_dir_entry_2 *de)
682int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
683 const struct ext4_dir_entry_2 *de,
684 struct dx_hash_info *hinfo)
685{ 670{
686 struct ext4_str iname = {.name = (unsigned char *) de->name, 671 int ret = -ENOENT;
687 .len = de->name_len}; 672 int bigname = (*name == '_');
688 struct ext4_str tmp;
689 int ret;
690 673
691 if (!ctx || 674 if (ctx->has_valid_key) {
692 ((iname.name[0] == '.') && 675 if (cstr->name == NULL) {
693 ((iname.len == 1) || 676 struct qstr istr;
694 ((iname.name[1] == '.') && (iname.len == 2))))) { 677
695 ext4fs_dirhash(iname.name, iname.len, hinfo); 678 ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr);
696 return 0; 679 if (ret < 0)
680 goto errout;
681 istr.name = name;
682 istr.len = len;
683 ret = ext4_fname_encrypt(ctx, &istr, cstr);
684 if (ret < 0)
685 goto errout;
686 }
687 } else {
688 if (cstr->name == NULL) {
689 cstr->name = kmalloc(32, GFP_KERNEL);
690 if (cstr->name == NULL)
691 return -ENOMEM;
692 if ((bigname && (len != 33)) ||
693 (!bigname && (len > 43)))
694 goto errout;
695 ret = digest_decode(name+bigname, len-bigname,
696 cstr->name);
697 if (ret < 0) {
698 ret = -ENOENT;
699 goto errout;
700 }
701 cstr->len = ret;
702 }
703 if (bigname) {
704 if (de->name_len < 16)
705 return 0;
706 ret = memcmp(de->name + de->name_len - 16,
707 cstr->name + 8, 16);
708 return (ret == 0) ? 1 : 0;
709 }
697 } 710 }
698 711 if (de->name_len != cstr->len)
699 tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; 712 return 0;
700 tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL); 713 ret = memcmp(de->name, cstr->name, cstr->len);
701 if (tmp.name == NULL) 714 return (ret == 0) ? 1 : 0;
702 return -ENOMEM; 715errout:
703 716 kfree(cstr->name);
704 ret = ext4_fname_hash(ctx, &iname, &tmp); 717 cstr->name = NULL;
705 if (ret > 0)
706 ext4fs_dirhash(tmp.name, tmp.len, hinfo);
707 ext4_fname_crypto_free_buffer(&tmp);
708 return ret; 718 return ret;
709} 719}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index c8392af8abbb..52170d0b7c40 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -110,6 +110,7 @@ int ext4_generate_encryption_key(struct inode *inode)
110 } 110 }
111 res = 0; 111 res = 0;
112 112
113 ei->i_crypt_policy_flags = ctx.flags;
113 if (S_ISREG(inode->i_mode)) 114 if (S_ISREG(inode->i_mode))
114 crypt_key->mode = ctx.contents_encryption_mode; 115 crypt_key->mode = ctx.contents_encryption_mode;
115 else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 116 else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
index 30eaf9e9864a..a6d6291aea16 100644
--- a/fs/ext4/crypto_policy.c
+++ b/fs/ext4/crypto_policy.c
@@ -37,6 +37,8 @@ static int ext4_is_encryption_context_consistent_with_policy(
37 return 0; 37 return 0;
38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, 38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
39 EXT4_KEY_DESCRIPTOR_SIZE) == 0 && 39 EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
40 (ctx.flags ==
41 policy->flags) &&
40 (ctx.contents_encryption_mode == 42 (ctx.contents_encryption_mode ==
41 policy->contents_encryption_mode) && 43 policy->contents_encryption_mode) &&
42 (ctx.filenames_encryption_mode == 44 (ctx.filenames_encryption_mode ==
@@ -56,25 +58,25 @@ static int ext4_create_encryption_context_from_policy(
56 printk(KERN_WARNING 58 printk(KERN_WARNING
57 "%s: Invalid contents encryption mode %d\n", __func__, 59 "%s: Invalid contents encryption mode %d\n", __func__,
58 policy->contents_encryption_mode); 60 policy->contents_encryption_mode);
59 res = -EINVAL; 61 return -EINVAL;
60 goto out;
61 } 62 }
62 if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { 63 if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
63 printk(KERN_WARNING 64 printk(KERN_WARNING
64 "%s: Invalid filenames encryption mode %d\n", __func__, 65 "%s: Invalid filenames encryption mode %d\n", __func__,
65 policy->filenames_encryption_mode); 66 policy->filenames_encryption_mode);
66 res = -EINVAL; 67 return -EINVAL;
67 goto out;
68 } 68 }
69 if (policy->flags & ~EXT4_POLICY_FLAGS_VALID)
70 return -EINVAL;
69 ctx.contents_encryption_mode = policy->contents_encryption_mode; 71 ctx.contents_encryption_mode = policy->contents_encryption_mode;
70 ctx.filenames_encryption_mode = policy->filenames_encryption_mode; 72 ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
73 ctx.flags = policy->flags;
71 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); 74 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
72 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); 75 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
73 76
74 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, 77 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
75 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, 78 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
76 sizeof(ctx), 0); 79 sizeof(ctx), 0);
77out:
78 if (!res) 80 if (!res)
79 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); 81 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
80 return res; 82 return res;
@@ -115,6 +117,7 @@ int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
115 policy->version = 0; 117 policy->version = 0;
116 policy->contents_encryption_mode = ctx.contents_encryption_mode; 118 policy->contents_encryption_mode = ctx.contents_encryption_mode;
117 policy->filenames_encryption_mode = ctx.filenames_encryption_mode; 119 policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
120 policy->flags = ctx.flags;
118 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, 121 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
119 EXT4_KEY_DESCRIPTOR_SIZE); 122 EXT4_KEY_DESCRIPTOR_SIZE);
120 return 0; 123 return 0;
@@ -176,6 +179,7 @@ int ext4_inherit_context(struct inode *parent, struct inode *child)
176 EXT4_ENCRYPTION_MODE_AES_256_XTS; 179 EXT4_ENCRYPTION_MODE_AES_256_XTS;
177 ctx.filenames_encryption_mode = 180 ctx.filenames_encryption_mode =
178 EXT4_ENCRYPTION_MODE_AES_256_CTS; 181 EXT4_ENCRYPTION_MODE_AES_256_CTS;
182 ctx.flags = 0;
179 memset(ctx.master_key_descriptor, 0x42, 183 memset(ctx.master_key_descriptor, 0x42,
180 EXT4_KEY_DESCRIPTOR_SIZE); 184 EXT4_KEY_DESCRIPTOR_SIZE);
181 res = 0; 185 res = 0;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 61db51a5ce4c..5665d82d2332 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -249,7 +249,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
249 } else { 249 } else {
250 /* Directory is encrypted */ 250 /* Directory is encrypted */
251 err = ext4_fname_disk_to_usr(enc_ctx, 251 err = ext4_fname_disk_to_usr(enc_ctx,
252 de, &fname_crypto_str); 252 NULL, de, &fname_crypto_str);
253 if (err < 0) 253 if (err < 0)
254 goto errout; 254 goto errout;
255 if (!dir_emit(ctx, 255 if (!dir_emit(ctx,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ef267adce19a..009a0590b20f 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -911,6 +911,7 @@ struct ext4_inode_info {
911 911
912 /* on-disk additional length */ 912 /* on-disk additional length */
913 __u16 i_extra_isize; 913 __u16 i_extra_isize;
914 char i_crypt_policy_flags;
914 915
915 /* Indicate the inline data space. */ 916 /* Indicate the inline data space. */
916 u16 i_inline_off; 917 u16 i_inline_off;
@@ -1066,12 +1067,6 @@ extern void ext4_set_bits(void *bm, int cur, int len);
1066/* Metadata checksum algorithm codes */ 1067/* Metadata checksum algorithm codes */
1067#define EXT4_CRC32C_CHKSUM 1 1068#define EXT4_CRC32C_CHKSUM 1
1068 1069
1069/* Encryption algorithms */
1070#define EXT4_ENCRYPTION_MODE_INVALID 0
1071#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
1072#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
1073#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
1074
1075/* 1070/*
1076 * Structure of the super block 1071 * Structure of the super block
1077 */ 1072 */
@@ -2093,9 +2088,11 @@ u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
2093int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, 2088int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
2094 u32 ilen, struct ext4_str *crypto_str); 2089 u32 ilen, struct ext4_str *crypto_str);
2095int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 2090int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
2091 struct dx_hash_info *hinfo,
2096 const struct ext4_str *iname, 2092 const struct ext4_str *iname,
2097 struct ext4_str *oname); 2093 struct ext4_str *oname);
2098int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, 2094int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
2095 struct dx_hash_info *hinfo,
2099 const struct ext4_dir_entry_2 *de, 2096 const struct ext4_dir_entry_2 *de,
2100 struct ext4_str *oname); 2097 struct ext4_str *oname);
2101int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, 2098int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
@@ -2104,11 +2101,12 @@ int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
2104int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, 2101int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
2105 const struct qstr *iname, 2102 const struct qstr *iname,
2106 struct dx_hash_info *hinfo); 2103 struct dx_hash_info *hinfo);
2107int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
2108 const struct ext4_dir_entry_2 *de,
2109 struct dx_hash_info *hinfo);
2110int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, 2104int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
2111 u32 namelen); 2105 u32 namelen);
2106int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
2107 int len, const char * const name,
2108 struct ext4_dir_entry_2 *de);
2109
2112 2110
2113#ifdef CONFIG_EXT4_FS_ENCRYPTION 2111#ifdef CONFIG_EXT4_FS_ENCRYPTION
2114void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx); 2112void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
index c2ba35a914b6..d75159c101ce 100644
--- a/fs/ext4/ext4_crypto.h
+++ b/fs/ext4/ext4_crypto.h
@@ -20,12 +20,20 @@ struct ext4_encryption_policy {
20 char version; 20 char version;
21 char contents_encryption_mode; 21 char contents_encryption_mode;
22 char filenames_encryption_mode; 22 char filenames_encryption_mode;
23 char flags;
23 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; 24 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
24} __attribute__((__packed__)); 25} __attribute__((__packed__));
25 26
26#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 27#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
27#define EXT4_KEY_DERIVATION_NONCE_SIZE 16 28#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
28 29
30#define EXT4_POLICY_FLAGS_PAD_4 0x00
31#define EXT4_POLICY_FLAGS_PAD_8 0x01
32#define EXT4_POLICY_FLAGS_PAD_16 0x02
33#define EXT4_POLICY_FLAGS_PAD_32 0x03
34#define EXT4_POLICY_FLAGS_PAD_MASK 0x03
35#define EXT4_POLICY_FLAGS_VALID 0x03
36
29/** 37/**
30 * Encryption context for inode 38 * Encryption context for inode
31 * 39 *
@@ -41,7 +49,7 @@ struct ext4_encryption_context {
41 char format; 49 char format;
42 char contents_encryption_mode; 50 char contents_encryption_mode;
43 char filenames_encryption_mode; 51 char filenames_encryption_mode;
44 char reserved; 52 char flags;
45 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; 53 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
46 char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; 54 char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
47} __attribute__((__packed__)); 55} __attribute__((__packed__));
@@ -120,6 +128,7 @@ struct ext4_fname_crypto_ctx {
120 struct crypto_hash *htfm; 128 struct crypto_hash *htfm;
121 struct page *workpage; 129 struct page *workpage;
122 struct ext4_encryption_key key; 130 struct ext4_encryption_key key;
131 unsigned flags : 8;
123 unsigned has_valid_key : 1; 132 unsigned has_valid_key : 1;
124 unsigned ctfm_key_is_ready : 1; 133 unsigned ctfm_key_is_ready : 1;
125}; 134};
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 973816bfe4a9..d74e08029643 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4927,13 +4927,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4927 if (ret) 4927 if (ret)
4928 return ret; 4928 return ret;
4929 4929
4930 /*
4931 * currently supporting (pre)allocate mode for extent-based
4932 * files _only_
4933 */
4934 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4935 return -EOPNOTSUPP;
4936
4937 if (mode & FALLOC_FL_COLLAPSE_RANGE) 4930 if (mode & FALLOC_FL_COLLAPSE_RANGE)
4938 return ext4_collapse_range(inode, offset, len); 4931 return ext4_collapse_range(inode, offset, len);
4939 4932
@@ -4955,6 +4948,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4955 4948
4956 mutex_lock(&inode->i_mutex); 4949 mutex_lock(&inode->i_mutex);
4957 4950
4951 /*
4952 * We only support preallocation for extent-based files only
4953 */
4954 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4955 ret = -EOPNOTSUPP;
4956 goto out;
4957 }
4958
4958 if (!(mode & FALLOC_FL_KEEP_SIZE) && 4959 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4959 offset + len > i_size_read(inode)) { 4960 offset + len > i_size_read(inode)) {
4960 new_size = offset + len; 4961 new_size = offset + len;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index d33d5a6852b9..26724aeece73 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -703,6 +703,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
703 703
704 BUG_ON(end < lblk); 704 BUG_ON(end < lblk);
705 705
706 if ((status & EXTENT_STATUS_DELAYED) &&
707 (status & EXTENT_STATUS_WRITTEN)) {
708 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
709 " delayed and written which can potentially "
710 " cause data loss.\n", lblk, len);
711 WARN_ON(1);
712 }
713
706 newes.es_lblk = lblk; 714 newes.es_lblk = lblk;
707 newes.es_len = len; 715 newes.es_len = len;
708 ext4_es_store_pblock_status(&newes, pblk, status); 716 ext4_es_store_pblock_status(&newes, pblk, status);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbd0654a2675..55b187c3bac1 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -531,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
531 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 531 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
532 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 532 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
533 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 533 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
534 !(status & EXTENT_STATUS_WRITTEN) &&
534 ext4_find_delalloc_range(inode, map->m_lblk, 535 ext4_find_delalloc_range(inode, map->m_lblk,
535 map->m_lblk + map->m_len - 1)) 536 map->m_lblk + map->m_len - 1))
536 status |= EXTENT_STATUS_DELAYED; 537 status |= EXTENT_STATUS_DELAYED;
@@ -635,6 +636,7 @@ found:
635 status = map->m_flags & EXT4_MAP_UNWRITTEN ? 636 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
636 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 637 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
637 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 638 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
639 !(status & EXTENT_STATUS_WRITTEN) &&
638 ext4_find_delalloc_range(inode, map->m_lblk, 640 ext4_find_delalloc_range(inode, map->m_lblk,
639 map->m_lblk + map->m_len - 1)) 641 map->m_lblk + map->m_len - 1))
640 status |= EXTENT_STATUS_DELAYED; 642 status |= EXTENT_STATUS_DELAYED;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 7223b0b4bc38..814f3beb4369 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -640,7 +640,7 @@ static struct stats dx_show_leaf(struct inode *dir,
640 ext4_put_fname_crypto_ctx(&ctx); 640 ext4_put_fname_crypto_ctx(&ctx);
641 ctx = NULL; 641 ctx = NULL;
642 } 642 }
643 res = ext4_fname_disk_to_usr(ctx, de, 643 res = ext4_fname_disk_to_usr(ctx, NULL, de,
644 &fname_crypto_str); 644 &fname_crypto_str);
645 if (res < 0) { 645 if (res < 0) {
646 printk(KERN_WARNING "Error " 646 printk(KERN_WARNING "Error "
@@ -653,15 +653,8 @@ static struct stats dx_show_leaf(struct inode *dir,
653 name = fname_crypto_str.name; 653 name = fname_crypto_str.name;
654 len = fname_crypto_str.len; 654 len = fname_crypto_str.len;
655 } 655 }
656 res = ext4_fname_disk_to_hash(ctx, de, 656 ext4fs_dirhash(de->name, de->name_len,
657 &h); 657 &h);
658 if (res < 0) {
659 printk(KERN_WARNING "Error "
660 "converting filename "
661 "from disk to htree"
662 "\n");
663 h.hash = 0xDEADBEEF;
664 }
665 printk("%*.s:(E)%x.%u ", len, name, 658 printk("%*.s:(E)%x.%u ", len, name,
666 h.hash, (unsigned) ((char *) de 659 h.hash, (unsigned) ((char *) de
667 - base)); 660 - base));
@@ -1008,15 +1001,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
1008 /* silently ignore the rest of the block */ 1001 /* silently ignore the rest of the block */
1009 break; 1002 break;
1010 } 1003 }
1011#ifdef CONFIG_EXT4_FS_ENCRYPTION
1012 err = ext4_fname_disk_to_hash(ctx, de, hinfo);
1013 if (err < 0) {
1014 count = err;
1015 goto errout;
1016 }
1017#else
1018 ext4fs_dirhash(de->name, de->name_len, hinfo); 1004 ext4fs_dirhash(de->name, de->name_len, hinfo);
1019#endif
1020 if ((hinfo->hash < start_hash) || 1005 if ((hinfo->hash < start_hash) ||
1021 ((hinfo->hash == start_hash) && 1006 ((hinfo->hash == start_hash) &&
1022 (hinfo->minor_hash < start_minor_hash))) 1007 (hinfo->minor_hash < start_minor_hash)))
@@ -1032,7 +1017,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
1032 &tmp_str); 1017 &tmp_str);
1033 } else { 1018 } else {
1034 /* Directory is encrypted */ 1019 /* Directory is encrypted */
1035 err = ext4_fname_disk_to_usr(ctx, de, 1020 err = ext4_fname_disk_to_usr(ctx, hinfo, de,
1036 &fname_crypto_str); 1021 &fname_crypto_str);
1037 if (err < 0) { 1022 if (err < 0) {
1038 count = err; 1023 count = err;
@@ -1193,26 +1178,10 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1193 int count = 0; 1178 int count = 0;
1194 char *base = (char *) de; 1179 char *base = (char *) de;
1195 struct dx_hash_info h = *hinfo; 1180 struct dx_hash_info h = *hinfo;
1196#ifdef CONFIG_EXT4_FS_ENCRYPTION
1197 struct ext4_fname_crypto_ctx *ctx = NULL;
1198 int err;
1199
1200 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
1201 if (IS_ERR(ctx))
1202 return PTR_ERR(ctx);
1203#endif
1204 1181
1205 while ((char *) de < base + blocksize) { 1182 while ((char *) de < base + blocksize) {
1206 if (de->name_len && de->inode) { 1183 if (de->name_len && de->inode) {
1207#ifdef CONFIG_EXT4_FS_ENCRYPTION
1208 err = ext4_fname_disk_to_hash(ctx, de, &h);
1209 if (err < 0) {
1210 ext4_put_fname_crypto_ctx(&ctx);
1211 return err;
1212 }
1213#else
1214 ext4fs_dirhash(de->name, de->name_len, &h); 1184 ext4fs_dirhash(de->name, de->name_len, &h);
1215#endif
1216 map_tail--; 1185 map_tail--;
1217 map_tail->hash = h.hash; 1186 map_tail->hash = h.hash;
1218 map_tail->offs = ((char *) de - base)>>2; 1187 map_tail->offs = ((char *) de - base)>>2;
@@ -1223,9 +1192,6 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1223 /* XXX: do we need to check rec_len == 0 case? -Chris */ 1192 /* XXX: do we need to check rec_len == 0 case? -Chris */
1224 de = ext4_next_entry(de, blocksize); 1193 de = ext4_next_entry(de, blocksize);
1225 } 1194 }
1226#ifdef CONFIG_EXT4_FS_ENCRYPTION
1227 ext4_put_fname_crypto_ctx(&ctx);
1228#endif
1229 return count; 1195 return count;
1230} 1196}
1231 1197
@@ -1287,16 +1253,8 @@ static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx,
1287 return 0; 1253 return 0;
1288 1254
1289#ifdef CONFIG_EXT4_FS_ENCRYPTION 1255#ifdef CONFIG_EXT4_FS_ENCRYPTION
1290 if (ctx) { 1256 if (ctx)
1291 /* Directory is encrypted */ 1257 return ext4_fname_match(ctx, fname_crypto_str, len, name, de);
1292 res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str);
1293 if (res < 0)
1294 return res;
1295 if (len != res)
1296 return 0;
1297 res = memcmp(name, fname_crypto_str->name, len);
1298 return (res == 0) ? 1 : 0;
1299 }
1300#endif 1258#endif
1301 if (len != de->name_len) 1259 if (len != de->name_len)
1302 return 0; 1260 return 0;
@@ -1324,16 +1282,6 @@ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1324 if (IS_ERR(ctx)) 1282 if (IS_ERR(ctx))
1325 return -1; 1283 return -1;
1326 1284
1327 if (ctx != NULL) {
1328 /* Allocate buffer to hold maximum name length */
1329 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1330 &fname_crypto_str);
1331 if (res < 0) {
1332 ext4_put_fname_crypto_ctx(&ctx);
1333 return -1;
1334 }
1335 }
1336
1337 de = (struct ext4_dir_entry_2 *)search_buf; 1285 de = (struct ext4_dir_entry_2 *)search_buf;
1338 dlimit = search_buf + buf_size; 1286 dlimit = search_buf + buf_size;
1339 while ((char *) de < dlimit) { 1287 while ((char *) de < dlimit) {
@@ -1872,14 +1820,6 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
1872 return res; 1820 return res;
1873 } 1821 }
1874 reclen = EXT4_DIR_REC_LEN(res); 1822 reclen = EXT4_DIR_REC_LEN(res);
1875
1876 /* Allocate buffer to hold maximum name length */
1877 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1878 &fname_crypto_str);
1879 if (res < 0) {
1880 ext4_put_fname_crypto_ctx(&ctx);
1881 return -1;
1882 }
1883 } 1823 }
1884 1824
1885 de = (struct ext4_dir_entry_2 *)buf; 1825 de = (struct ext4_dir_entry_2 *)buf;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 8a8ec6293b19..cf0c472047e3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1432,12 +1432,15 @@ static int ext4_flex_group_add(struct super_block *sb,
1432 goto exit; 1432 goto exit;
1433 /* 1433 /*
1434 * We will always be modifying at least the superblock and GDT 1434 * We will always be modifying at least the superblock and GDT
1435 * block. If we are adding a group past the last current GDT block, 1435 * blocks. If we are adding a group past the last current GDT block,
1436 * we will also modify the inode and the dindirect block. If we 1436 * we will also modify the inode and the dindirect block. If we
1437 * are adding a group with superblock/GDT backups we will also 1437 * are adding a group with superblock/GDT backups we will also
1438 * modify each of the reserved GDT dindirect blocks. 1438 * modify each of the reserved GDT dindirect blocks.
1439 */ 1439 */
1440 credit = flex_gd->count * 4 + reserved_gdb; 1440 credit = 3; /* sb, resize inode, resize inode dindirect */
1441 /* GDT blocks */
1442 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1443 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1441 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); 1444 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1442 if (IS_ERR(handle)) { 1445 if (IS_ERR(handle)) {
1443 err = PTR_ERR(handle); 1446 err = PTR_ERR(handle);
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 19f78f20975e..187b78920314 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -74,7 +74,7 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
74 goto errout; 74 goto errout;
75 } 75 }
76 pstr.name = paddr; 76 pstr.name = paddr;
77 res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr); 77 res = _ext4_fname_disk_to_usr(ctx, NULL, &cstr, &pstr);
78 if (res < 0) 78 if (res < 0)
79 goto errout; 79 goto errout;
80 /* Null-terminate the name */ 80 /* Null-terminate the name */
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b91b0e10678e..1e1aae669fa8 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1513,6 +1513,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
1513{ 1513{
1514 struct inode *inode = mapping->host; 1514 struct inode *inode = mapping->host;
1515 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1515 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1516 bool locked = false;
1516 int ret; 1517 int ret;
1517 long diff; 1518 long diff;
1518 1519
@@ -1533,7 +1534,13 @@ static int f2fs_write_data_pages(struct address_space *mapping,
1533 1534
1534 diff = nr_pages_to_write(sbi, DATA, wbc); 1535 diff = nr_pages_to_write(sbi, DATA, wbc);
1535 1536
1537 if (!S_ISDIR(inode->i_mode)) {
1538 mutex_lock(&sbi->writepages);
1539 locked = true;
1540 }
1536 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 1541 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
1542 if (locked)
1543 mutex_unlock(&sbi->writepages);
1537 1544
1538 f2fs_submit_merged_bio(sbi, DATA, WRITE); 1545 f2fs_submit_merged_bio(sbi, DATA, WRITE);
1539 1546
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index d8921cf2ba9a..8de34ab6d5b1 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -625,6 +625,7 @@ struct f2fs_sb_info {
625 struct mutex cp_mutex; /* checkpoint procedure lock */ 625 struct mutex cp_mutex; /* checkpoint procedure lock */
626 struct rw_semaphore cp_rwsem; /* blocking FS operations */ 626 struct rw_semaphore cp_rwsem; /* blocking FS operations */
627 struct rw_semaphore node_write; /* locking node writes */ 627 struct rw_semaphore node_write; /* locking node writes */
628 struct mutex writepages; /* mutex for writepages() */
628 wait_queue_head_t cp_wait; 629 wait_queue_head_t cp_wait;
629 630
630 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ 631 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7e3794edae42..658e8079aaf9 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -298,16 +298,14 @@ fail:
298 298
299static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) 299static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
300{ 300{
301 struct page *page; 301 struct page *page = page_follow_link_light(dentry, nd);
302 302
303 page = page_follow_link_light(dentry, nd); 303 if (IS_ERR_OR_NULL(page))
304 if (IS_ERR(page))
305 return page; 304 return page;
306 305
307 /* this is broken symlink case */ 306 /* this is broken symlink case */
308 if (*nd_get_link(nd) == 0) { 307 if (*nd_get_link(nd) == 0) {
309 kunmap(page); 308 page_put_link(dentry, nd, page);
310 page_cache_release(page);
311 return ERR_PTR(-ENOENT); 309 return ERR_PTR(-ENOENT);
312 } 310 }
313 return page; 311 return page;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 160b88346b24..b2dd1b01f076 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1035,6 +1035,7 @@ try_onemore:
1035 sbi->raw_super = raw_super; 1035 sbi->raw_super = raw_super;
1036 sbi->raw_super_buf = raw_super_buf; 1036 sbi->raw_super_buf = raw_super_buf;
1037 mutex_init(&sbi->gc_mutex); 1037 mutex_init(&sbi->gc_mutex);
1038 mutex_init(&sbi->writepages);
1038 mutex_init(&sbi->cp_mutex); 1039 mutex_init(&sbi->cp_mutex);
1039 init_rwsem(&sbi->node_write); 1040 init_rwsem(&sbi->node_write);
1040 clear_sbi_flag(sbi, SBI_POR_DOING); 1041 clear_sbi_flag(sbi, SBI_POR_DOING);
diff --git a/fs/namei.c b/fs/namei.c
index 4a8d998b7274..fe30d3be43a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1415,6 +1415,7 @@ static int lookup_fast(struct nameidata *nd,
1415 */ 1415 */
1416 if (nd->flags & LOOKUP_RCU) { 1416 if (nd->flags & LOOKUP_RCU) {
1417 unsigned seq; 1417 unsigned seq;
1418 bool negative;
1418 dentry = __d_lookup_rcu(parent, &nd->last, &seq); 1419 dentry = __d_lookup_rcu(parent, &nd->last, &seq);
1419 if (!dentry) 1420 if (!dentry)
1420 goto unlazy; 1421 goto unlazy;
@@ -1424,8 +1425,11 @@ static int lookup_fast(struct nameidata *nd,
1424 * the dentry name information from lookup. 1425 * the dentry name information from lookup.
1425 */ 1426 */
1426 *inode = dentry->d_inode; 1427 *inode = dentry->d_inode;
1428 negative = d_is_negative(dentry);
1427 if (read_seqcount_retry(&dentry->d_seq, seq)) 1429 if (read_seqcount_retry(&dentry->d_seq, seq))
1428 return -ECHILD; 1430 return -ECHILD;
1431 if (negative)
1432 return -ENOENT;
1429 1433
1430 /* 1434 /*
1431 * This sequence count validates that the parent had no 1435 * This sequence count validates that the parent had no
@@ -1472,6 +1476,10 @@ unlazy:
1472 goto need_lookup; 1476 goto need_lookup;
1473 } 1477 }
1474 1478
1479 if (unlikely(d_is_negative(dentry))) {
1480 dput(dentry);
1481 return -ENOENT;
1482 }
1475 path->mnt = mnt; 1483 path->mnt = mnt;
1476 path->dentry = dentry; 1484 path->dentry = dentry;
1477 err = follow_managed(path, nd->flags); 1485 err = follow_managed(path, nd->flags);
@@ -1583,10 +1591,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
1583 goto out_err; 1591 goto out_err;
1584 1592
1585 inode = path->dentry->d_inode; 1593 inode = path->dentry->d_inode;
1594 err = -ENOENT;
1595 if (d_is_negative(path->dentry))
1596 goto out_path_put;
1586 } 1597 }
1587 err = -ENOENT;
1588 if (d_is_negative(path->dentry))
1589 goto out_path_put;
1590 1598
1591 if (should_follow_link(path->dentry, follow)) { 1599 if (should_follow_link(path->dentry, follow)) {
1592 if (nd->flags & LOOKUP_RCU) { 1600 if (nd->flags & LOOKUP_RCU) {
@@ -3036,14 +3044,13 @@ retry_lookup:
3036 3044
3037 BUG_ON(nd->flags & LOOKUP_RCU); 3045 BUG_ON(nd->flags & LOOKUP_RCU);
3038 inode = path->dentry->d_inode; 3046 inode = path->dentry->d_inode;
3039finish_lookup:
3040 /* we _can_ be in RCU mode here */
3041 error = -ENOENT; 3047 error = -ENOENT;
3042 if (d_is_negative(path->dentry)) { 3048 if (d_is_negative(path->dentry)) {
3043 path_to_nameidata(path, nd); 3049 path_to_nameidata(path, nd);
3044 goto out; 3050 goto out;
3045 } 3051 }
3046 3052finish_lookup:
3053 /* we _can_ be in RCU mode here */
3047 if (should_follow_link(path->dentry, !symlink_ok)) { 3054 if (should_follow_link(path->dentry, !symlink_ok)) {
3048 if (nd->flags & LOOKUP_RCU) { 3055 if (nd->flags & LOOKUP_RCU) {
3049 if (unlikely(nd->path.mnt != path->mnt || 3056 if (unlikely(nd->path.mnt != path->mnt ||
@@ -3226,7 +3233,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
3226 3233
3227 if (unlikely(file->f_flags & __O_TMPFILE)) { 3234 if (unlikely(file->f_flags & __O_TMPFILE)) {
3228 error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); 3235 error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
3229 goto out; 3236 goto out2;
3230 } 3237 }
3231 3238
3232 error = path_init(dfd, pathname, flags, nd); 3239 error = path_init(dfd, pathname, flags, nd);
@@ -3256,6 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
3256 } 3263 }
3257out: 3264out:
3258 path_cleanup(nd); 3265 path_cleanup(nd);
3266out2:
3259 if (!(opened & FILE_OPENED)) { 3267 if (!(opened & FILE_OPENED)) {
3260 BUG_ON(!error); 3268 BUG_ON(!error);
3261 put_filp(file); 3269 put_filp(file);
diff --git a/fs/namespace.c b/fs/namespace.c
index 1f4f9dac6e5a..1b9e11167bae 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3179,6 +3179,12 @@ bool fs_fully_visible(struct file_system_type *type)
3179 if (mnt->mnt.mnt_sb->s_type != type) 3179 if (mnt->mnt.mnt_sb->s_type != type)
3180 continue; 3180 continue;
3181 3181
3182 /* This mount is not fully visible if it's root directory
3183 * is not the root directory of the filesystem.
3184 */
3185 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3186 continue;
3187
3182 /* This mount is not fully visible if there are any child mounts 3188 /* This mount is not fully visible if there are any child mounts
3183 * that cover anything except for empty directories. 3189 * that cover anything except for empty directories.
3184 */ 3190 */
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 03d647bf195d..cdefaa331a07 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
181} 181}
182 182
183const struct nfsd4_layout_ops bl_layout_ops = { 183const struct nfsd4_layout_ops bl_layout_ops = {
184 /*
185 * Pretend that we send notification to the client. This is a blatant
186 * lie to force recent Linux clients to cache our device IDs.
187 * We rarely ever change the device ID, so the harm of leaking deviceids
188 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
189 * in this regard, but I filed errata 4119 for this a while ago, and
190 * hopefully the Linux client will eventually start caching deviceids
191 * without this again.
192 */
193 .notify_types =
194 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
184 .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, 195 .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
185 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, 196 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
186 .proc_layoutget = nfsd4_block_proc_layoutget, 197 .proc_layoutget = nfsd4_block_proc_layoutget,
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 58277859a467..5694cfb7a47b 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -224,7 +224,7 @@ static int nfs_cb_stat_to_errno(int status)
224} 224}
225 225
226static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, 226static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
227 enum nfsstat4 *status) 227 int *status)
228{ 228{
229 __be32 *p; 229 __be32 *p;
230 u32 op; 230 u32 op;
@@ -235,7 +235,7 @@ static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
235 op = be32_to_cpup(p++); 235 op = be32_to_cpup(p++);
236 if (unlikely(op != expected)) 236 if (unlikely(op != expected))
237 goto out_unexpected; 237 goto out_unexpected;
238 *status = be32_to_cpup(p); 238 *status = nfs_cb_stat_to_errno(be32_to_cpup(p));
239 return 0; 239 return 0;
240out_overflow: 240out_overflow:
241 print_overflow_msg(__func__, xdr); 241 print_overflow_msg(__func__, xdr);
@@ -446,22 +446,16 @@ out_overflow:
446static int decode_cb_sequence4res(struct xdr_stream *xdr, 446static int decode_cb_sequence4res(struct xdr_stream *xdr,
447 struct nfsd4_callback *cb) 447 struct nfsd4_callback *cb)
448{ 448{
449 enum nfsstat4 nfserr;
450 int status; 449 int status;
451 450
452 if (cb->cb_minorversion == 0) 451 if (cb->cb_minorversion == 0)
453 return 0; 452 return 0;
454 453
455 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr); 454 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status);
456 if (unlikely(status)) 455 if (unlikely(status || cb->cb_status))
457 goto out; 456 return status;
458 if (unlikely(nfserr != NFS4_OK)) 457
459 goto out_default; 458 return decode_cb_sequence4resok(xdr, cb);
460 status = decode_cb_sequence4resok(xdr, cb);
461out:
462 return status;
463out_default:
464 return nfs_cb_stat_to_errno(nfserr);
465} 459}
466 460
467/* 461/*
@@ -524,26 +518,19 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
524 struct nfsd4_callback *cb) 518 struct nfsd4_callback *cb)
525{ 519{
526 struct nfs4_cb_compound_hdr hdr; 520 struct nfs4_cb_compound_hdr hdr;
527 enum nfsstat4 nfserr;
528 int status; 521 int status;
529 522
530 status = decode_cb_compound4res(xdr, &hdr); 523 status = decode_cb_compound4res(xdr, &hdr);
531 if (unlikely(status)) 524 if (unlikely(status))
532 goto out; 525 return status;
533 526
534 if (cb != NULL) { 527 if (cb != NULL) {
535 status = decode_cb_sequence4res(xdr, cb); 528 status = decode_cb_sequence4res(xdr, cb);
536 if (unlikely(status)) 529 if (unlikely(status || cb->cb_status))
537 goto out; 530 return status;
538 } 531 }
539 532
540 status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr); 533 return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
541 if (unlikely(status))
542 goto out;
543 if (unlikely(nfserr != NFS4_OK))
544 status = nfs_cb_stat_to_errno(nfserr);
545out:
546 return status;
547} 534}
548 535
549#ifdef CONFIG_NFSD_PNFS 536#ifdef CONFIG_NFSD_PNFS
@@ -621,24 +608,18 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
621 struct nfsd4_callback *cb) 608 struct nfsd4_callback *cb)
622{ 609{
623 struct nfs4_cb_compound_hdr hdr; 610 struct nfs4_cb_compound_hdr hdr;
624 enum nfsstat4 nfserr;
625 int status; 611 int status;
626 612
627 status = decode_cb_compound4res(xdr, &hdr); 613 status = decode_cb_compound4res(xdr, &hdr);
628 if (unlikely(status)) 614 if (unlikely(status))
629 goto out; 615 return status;
616
630 if (cb) { 617 if (cb) {
631 status = decode_cb_sequence4res(xdr, cb); 618 status = decode_cb_sequence4res(xdr, cb);
632 if (unlikely(status)) 619 if (unlikely(status || cb->cb_status))
633 goto out; 620 return status;
634 } 621 }
635 status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr); 622 return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
636 if (unlikely(status))
637 goto out;
638 if (unlikely(nfserr != NFS4_OK))
639 status = nfs_cb_stat_to_errno(nfserr);
640out:
641 return status;
642} 623}
643#endif /* CONFIG_NFSD_PNFS */ 624#endif /* CONFIG_NFSD_PNFS */
644 625
@@ -898,13 +879,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
898 if (!nfsd41_cb_get_slot(clp, task)) 879 if (!nfsd41_cb_get_slot(clp, task))
899 return; 880 return;
900 } 881 }
901 spin_lock(&clp->cl_lock);
902 if (list_empty(&cb->cb_per_client)) {
903 /* This is the first call, not a restart */
904 cb->cb_done = false;
905 list_add(&cb->cb_per_client, &clp->cl_callbacks);
906 }
907 spin_unlock(&clp->cl_lock);
908 rpc_call_start(task); 882 rpc_call_start(task);
909} 883}
910 884
@@ -918,22 +892,33 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
918 892
919 if (clp->cl_minorversion) { 893 if (clp->cl_minorversion) {
920 /* No need for lock, access serialized in nfsd4_cb_prepare */ 894 /* No need for lock, access serialized in nfsd4_cb_prepare */
921 ++clp->cl_cb_session->se_cb_seq_nr; 895 if (!task->tk_status)
896 ++clp->cl_cb_session->se_cb_seq_nr;
922 clear_bit(0, &clp->cl_cb_slot_busy); 897 clear_bit(0, &clp->cl_cb_slot_busy);
923 rpc_wake_up_next(&clp->cl_cb_waitq); 898 rpc_wake_up_next(&clp->cl_cb_waitq);
924 dprintk("%s: freed slot, new seqid=%d\n", __func__, 899 dprintk("%s: freed slot, new seqid=%d\n", __func__,
925 clp->cl_cb_session->se_cb_seq_nr); 900 clp->cl_cb_session->se_cb_seq_nr);
926 } 901 }
927 902
928 if (clp->cl_cb_client != task->tk_client) { 903 /*
929 /* We're shutting down or changing cl_cb_client; leave 904 * If the backchannel connection was shut down while this
930 * it to nfsd4_process_cb_update to restart the call if 905 * task was queued, we need to resubmit it after setting up
931 * necessary. */ 906 * a new backchannel connection.
907 *
908 * Note that if we lost our callback connection permanently
909 * the submission code will error out, so we don't need to
910 * handle that case here.
911 */
912 if (task->tk_flags & RPC_TASK_KILLED) {
913 task->tk_status = 0;
914 cb->cb_need_restart = true;
932 return; 915 return;
933 } 916 }
934 917
935 if (cb->cb_done) 918 if (cb->cb_status) {
936 return; 919 WARN_ON_ONCE(task->tk_status);
920 task->tk_status = cb->cb_status;
921 }
937 922
938 switch (cb->cb_ops->done(cb, task)) { 923 switch (cb->cb_ops->done(cb, task)) {
939 case 0: 924 case 0:
@@ -949,21 +934,17 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
949 default: 934 default:
950 BUG(); 935 BUG();
951 } 936 }
952 cb->cb_done = true;
953} 937}
954 938
955static void nfsd4_cb_release(void *calldata) 939static void nfsd4_cb_release(void *calldata)
956{ 940{
957 struct nfsd4_callback *cb = calldata; 941 struct nfsd4_callback *cb = calldata;
958 struct nfs4_client *clp = cb->cb_clp;
959
960 if (cb->cb_done) {
961 spin_lock(&clp->cl_lock);
962 list_del(&cb->cb_per_client);
963 spin_unlock(&clp->cl_lock);
964 942
943 if (cb->cb_need_restart)
944 nfsd4_run_cb(cb);
945 else
965 cb->cb_ops->release(cb); 946 cb->cb_ops->release(cb);
966 } 947
967} 948}
968 949
969static const struct rpc_call_ops nfsd4_cb_ops = { 950static const struct rpc_call_ops nfsd4_cb_ops = {
@@ -1058,9 +1039,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
1058 nfsd4_mark_cb_down(clp, err); 1039 nfsd4_mark_cb_down(clp, err);
1059 return; 1040 return;
1060 } 1041 }
1061 /* Yay, the callback channel's back! Restart any callbacks: */
1062 list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
1063 queue_work(callback_wq, &cb->cb_work);
1064} 1042}
1065 1043
1066static void 1044static void
@@ -1071,8 +1049,12 @@ nfsd4_run_cb_work(struct work_struct *work)
1071 struct nfs4_client *clp = cb->cb_clp; 1049 struct nfs4_client *clp = cb->cb_clp;
1072 struct rpc_clnt *clnt; 1050 struct rpc_clnt *clnt;
1073 1051
1074 if (cb->cb_ops && cb->cb_ops->prepare) 1052 if (cb->cb_need_restart) {
1075 cb->cb_ops->prepare(cb); 1053 cb->cb_need_restart = false;
1054 } else {
1055 if (cb->cb_ops && cb->cb_ops->prepare)
1056 cb->cb_ops->prepare(cb);
1057 }
1076 1058
1077 if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK) 1059 if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1078 nfsd4_process_cb_update(cb); 1060 nfsd4_process_cb_update(cb);
@@ -1084,6 +1066,15 @@ nfsd4_run_cb_work(struct work_struct *work)
1084 cb->cb_ops->release(cb); 1066 cb->cb_ops->release(cb);
1085 return; 1067 return;
1086 } 1068 }
1069
1070 /*
1071 * Don't send probe messages for 4.1 or later.
1072 */
1073 if (!cb->cb_ops && clp->cl_minorversion) {
1074 clp->cl_cb_state = NFSD4_CB_UP;
1075 return;
1076 }
1077
1087 cb->cb_msg.rpc_cred = clp->cl_cb_cred; 1078 cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1088 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, 1079 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1089 cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); 1080 cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
@@ -1098,8 +1089,8 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
1098 cb->cb_msg.rpc_resp = cb; 1089 cb->cb_msg.rpc_resp = cb;
1099 cb->cb_ops = ops; 1090 cb->cb_ops = ops;
1100 INIT_WORK(&cb->cb_work, nfsd4_run_cb_work); 1091 INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
1101 INIT_LIST_HEAD(&cb->cb_per_client); 1092 cb->cb_status = 0;
1102 cb->cb_done = true; 1093 cb->cb_need_restart = false;
1103} 1094}
1104 1095
1105void nfsd4_run_cb(struct nfsd4_callback *cb) 1096void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 38f2d7abe3a7..039f9c8a95e8 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -94,6 +94,7 @@ static struct kmem_cache *lockowner_slab;
94static struct kmem_cache *file_slab; 94static struct kmem_cache *file_slab;
95static struct kmem_cache *stateid_slab; 95static struct kmem_cache *stateid_slab;
96static struct kmem_cache *deleg_slab; 96static struct kmem_cache *deleg_slab;
97static struct kmem_cache *odstate_slab;
97 98
98static void free_session(struct nfsd4_session *); 99static void free_session(struct nfsd4_session *);
99 100
@@ -281,6 +282,7 @@ put_nfs4_file(struct nfs4_file *fi)
281 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { 282 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
282 hlist_del_rcu(&fi->fi_hash); 283 hlist_del_rcu(&fi->fi_hash);
283 spin_unlock(&state_lock); 284 spin_unlock(&state_lock);
285 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
284 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 286 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
285 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); 287 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
286 } 288 }
@@ -471,6 +473,86 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
471 __nfs4_file_put_access(fp, O_RDONLY); 473 __nfs4_file_put_access(fp, O_RDONLY);
472} 474}
473 475
476/*
477 * Allocate a new open/delegation state counter. This is needed for
478 * pNFS for proper return on close semantics.
479 *
480 * Note that we only allocate it for pNFS-enabled exports, otherwise
481 * all pointers to struct nfs4_clnt_odstate are always NULL.
482 */
483static struct nfs4_clnt_odstate *
484alloc_clnt_odstate(struct nfs4_client *clp)
485{
486 struct nfs4_clnt_odstate *co;
487
488 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
489 if (co) {
490 co->co_client = clp;
491 atomic_set(&co->co_odcount, 1);
492 }
493 return co;
494}
495
496static void
497hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
498{
499 struct nfs4_file *fp = co->co_file;
500
501 lockdep_assert_held(&fp->fi_lock);
502 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
503}
504
505static inline void
506get_clnt_odstate(struct nfs4_clnt_odstate *co)
507{
508 if (co)
509 atomic_inc(&co->co_odcount);
510}
511
512static void
513put_clnt_odstate(struct nfs4_clnt_odstate *co)
514{
515 struct nfs4_file *fp;
516
517 if (!co)
518 return;
519
520 fp = co->co_file;
521 if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
522 list_del(&co->co_perfile);
523 spin_unlock(&fp->fi_lock);
524
525 nfsd4_return_all_file_layouts(co->co_client, fp);
526 kmem_cache_free(odstate_slab, co);
527 }
528}
529
530static struct nfs4_clnt_odstate *
531find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
532{
533 struct nfs4_clnt_odstate *co;
534 struct nfs4_client *cl;
535
536 if (!new)
537 return NULL;
538
539 cl = new->co_client;
540
541 spin_lock(&fp->fi_lock);
542 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
543 if (co->co_client == cl) {
544 get_clnt_odstate(co);
545 goto out;
546 }
547 }
548 co = new;
549 co->co_file = fp;
550 hash_clnt_odstate_locked(new);
551out:
552 spin_unlock(&fp->fi_lock);
553 return co;
554}
555
474struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 556struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
475 struct kmem_cache *slab) 557 struct kmem_cache *slab)
476{ 558{
@@ -606,7 +688,8 @@ static void block_delegations(struct knfsd_fh *fh)
606} 688}
607 689
608static struct nfs4_delegation * 690static struct nfs4_delegation *
609alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh) 691alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
692 struct nfs4_clnt_odstate *odstate)
610{ 693{
611 struct nfs4_delegation *dp; 694 struct nfs4_delegation *dp;
612 long n; 695 long n;
@@ -631,6 +714,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
631 INIT_LIST_HEAD(&dp->dl_perfile); 714 INIT_LIST_HEAD(&dp->dl_perfile);
632 INIT_LIST_HEAD(&dp->dl_perclnt); 715 INIT_LIST_HEAD(&dp->dl_perclnt);
633 INIT_LIST_HEAD(&dp->dl_recall_lru); 716 INIT_LIST_HEAD(&dp->dl_recall_lru);
717 dp->dl_clnt_odstate = odstate;
718 get_clnt_odstate(odstate);
634 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 719 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
635 dp->dl_retries = 1; 720 dp->dl_retries = 1;
636 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 721 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
@@ -714,6 +799,7 @@ static void destroy_delegation(struct nfs4_delegation *dp)
714 spin_lock(&state_lock); 799 spin_lock(&state_lock);
715 unhash_delegation_locked(dp); 800 unhash_delegation_locked(dp);
716 spin_unlock(&state_lock); 801 spin_unlock(&state_lock);
802 put_clnt_odstate(dp->dl_clnt_odstate);
717 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 803 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
718 nfs4_put_stid(&dp->dl_stid); 804 nfs4_put_stid(&dp->dl_stid);
719} 805}
@@ -724,6 +810,7 @@ static void revoke_delegation(struct nfs4_delegation *dp)
724 810
725 WARN_ON(!list_empty(&dp->dl_recall_lru)); 811 WARN_ON(!list_empty(&dp->dl_recall_lru));
726 812
813 put_clnt_odstate(dp->dl_clnt_odstate);
727 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 814 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
728 815
729 if (clp->cl_minorversion == 0) 816 if (clp->cl_minorversion == 0)
@@ -933,6 +1020,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
933{ 1020{
934 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1021 struct nfs4_ol_stateid *stp = openlockstateid(stid);
935 1022
1023 put_clnt_odstate(stp->st_clnt_odstate);
936 release_all_access(stp); 1024 release_all_access(stp);
937 if (stp->st_stateowner) 1025 if (stp->st_stateowner)
938 nfs4_put_stateowner(stp->st_stateowner); 1026 nfs4_put_stateowner(stp->st_stateowner);
@@ -1538,7 +1626,6 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
1538 INIT_LIST_HEAD(&clp->cl_openowners); 1626 INIT_LIST_HEAD(&clp->cl_openowners);
1539 INIT_LIST_HEAD(&clp->cl_delegations); 1627 INIT_LIST_HEAD(&clp->cl_delegations);
1540 INIT_LIST_HEAD(&clp->cl_lru); 1628 INIT_LIST_HEAD(&clp->cl_lru);
1541 INIT_LIST_HEAD(&clp->cl_callbacks);
1542 INIT_LIST_HEAD(&clp->cl_revoked); 1629 INIT_LIST_HEAD(&clp->cl_revoked);
1543#ifdef CONFIG_NFSD_PNFS 1630#ifdef CONFIG_NFSD_PNFS
1544 INIT_LIST_HEAD(&clp->cl_lo_states); 1631 INIT_LIST_HEAD(&clp->cl_lo_states);
@@ -1634,6 +1721,7 @@ __destroy_client(struct nfs4_client *clp)
1634 while (!list_empty(&reaplist)) { 1721 while (!list_empty(&reaplist)) {
1635 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1722 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1636 list_del_init(&dp->dl_recall_lru); 1723 list_del_init(&dp->dl_recall_lru);
1724 put_clnt_odstate(dp->dl_clnt_odstate);
1637 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 1725 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1638 nfs4_put_stid(&dp->dl_stid); 1726 nfs4_put_stid(&dp->dl_stid);
1639 } 1727 }
@@ -3057,6 +3145,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3057 spin_lock_init(&fp->fi_lock); 3145 spin_lock_init(&fp->fi_lock);
3058 INIT_LIST_HEAD(&fp->fi_stateids); 3146 INIT_LIST_HEAD(&fp->fi_stateids);
3059 INIT_LIST_HEAD(&fp->fi_delegations); 3147 INIT_LIST_HEAD(&fp->fi_delegations);
3148 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3060 fh_copy_shallow(&fp->fi_fhandle, fh); 3149 fh_copy_shallow(&fp->fi_fhandle, fh);
3061 fp->fi_deleg_file = NULL; 3150 fp->fi_deleg_file = NULL;
3062 fp->fi_had_conflict = false; 3151 fp->fi_had_conflict = false;
@@ -3073,6 +3162,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3073void 3162void
3074nfsd4_free_slabs(void) 3163nfsd4_free_slabs(void)
3075{ 3164{
3165 kmem_cache_destroy(odstate_slab);
3076 kmem_cache_destroy(openowner_slab); 3166 kmem_cache_destroy(openowner_slab);
3077 kmem_cache_destroy(lockowner_slab); 3167 kmem_cache_destroy(lockowner_slab);
3078 kmem_cache_destroy(file_slab); 3168 kmem_cache_destroy(file_slab);
@@ -3103,8 +3193,14 @@ nfsd4_init_slabs(void)
3103 sizeof(struct nfs4_delegation), 0, 0, NULL); 3193 sizeof(struct nfs4_delegation), 0, 0, NULL);
3104 if (deleg_slab == NULL) 3194 if (deleg_slab == NULL)
3105 goto out_free_stateid_slab; 3195 goto out_free_stateid_slab;
3196 odstate_slab = kmem_cache_create("nfsd4_odstate",
3197 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3198 if (odstate_slab == NULL)
3199 goto out_free_deleg_slab;
3106 return 0; 3200 return 0;
3107 3201
3202out_free_deleg_slab:
3203 kmem_cache_destroy(deleg_slab);
3108out_free_stateid_slab: 3204out_free_stateid_slab:
3109 kmem_cache_destroy(stateid_slab); 3205 kmem_cache_destroy(stateid_slab);
3110out_free_file_slab: 3206out_free_file_slab:
@@ -3581,6 +3677,14 @@ alloc_stateid:
3581 open->op_stp = nfs4_alloc_open_stateid(clp); 3677 open->op_stp = nfs4_alloc_open_stateid(clp);
3582 if (!open->op_stp) 3678 if (!open->op_stp)
3583 return nfserr_jukebox; 3679 return nfserr_jukebox;
3680
3681 if (nfsd4_has_session(cstate) &&
3682 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
3683 open->op_odstate = alloc_clnt_odstate(clp);
3684 if (!open->op_odstate)
3685 return nfserr_jukebox;
3686 }
3687
3584 return nfs_ok; 3688 return nfs_ok;
3585} 3689}
3586 3690
@@ -3869,7 +3973,7 @@ out_fput:
3869 3973
3870static struct nfs4_delegation * 3974static struct nfs4_delegation *
3871nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, 3975nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3872 struct nfs4_file *fp) 3976 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
3873{ 3977{
3874 int status; 3978 int status;
3875 struct nfs4_delegation *dp; 3979 struct nfs4_delegation *dp;
@@ -3877,7 +3981,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3877 if (fp->fi_had_conflict) 3981 if (fp->fi_had_conflict)
3878 return ERR_PTR(-EAGAIN); 3982 return ERR_PTR(-EAGAIN);
3879 3983
3880 dp = alloc_init_deleg(clp, fh); 3984 dp = alloc_init_deleg(clp, fh, odstate);
3881 if (!dp) 3985 if (!dp)
3882 return ERR_PTR(-ENOMEM); 3986 return ERR_PTR(-ENOMEM);
3883 3987
@@ -3903,6 +4007,7 @@ out_unlock:
3903 spin_unlock(&state_lock); 4007 spin_unlock(&state_lock);
3904out: 4008out:
3905 if (status) { 4009 if (status) {
4010 put_clnt_odstate(dp->dl_clnt_odstate);
3906 nfs4_put_stid(&dp->dl_stid); 4011 nfs4_put_stid(&dp->dl_stid);
3907 return ERR_PTR(status); 4012 return ERR_PTR(status);
3908 } 4013 }
@@ -3980,7 +4085,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
3980 default: 4085 default:
3981 goto out_no_deleg; 4086 goto out_no_deleg;
3982 } 4087 }
3983 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file); 4088 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
3984 if (IS_ERR(dp)) 4089 if (IS_ERR(dp))
3985 goto out_no_deleg; 4090 goto out_no_deleg;
3986 4091
@@ -4069,6 +4174,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4069 release_open_stateid(stp); 4174 release_open_stateid(stp);
4070 goto out; 4175 goto out;
4071 } 4176 }
4177
4178 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4179 open->op_odstate);
4180 if (stp->st_clnt_odstate == open->op_odstate)
4181 open->op_odstate = NULL;
4072 } 4182 }
4073 update_stateid(&stp->st_stid.sc_stateid); 4183 update_stateid(&stp->st_stid.sc_stateid);
4074 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4184 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4129,6 +4239,8 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4129 kmem_cache_free(file_slab, open->op_file); 4239 kmem_cache_free(file_slab, open->op_file);
4130 if (open->op_stp) 4240 if (open->op_stp)
4131 nfs4_put_stid(&open->op_stp->st_stid); 4241 nfs4_put_stid(&open->op_stp->st_stid);
4242 if (open->op_odstate)
4243 kmem_cache_free(odstate_slab, open->op_odstate);
4132} 4244}
4133 4245
4134__be32 4246__be32
@@ -4385,10 +4497,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
4385 return nfserr_old_stateid; 4497 return nfserr_old_stateid;
4386} 4498}
4387 4499
4500static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4501{
4502 if (ols->st_stateowner->so_is_open_owner &&
4503 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4504 return nfserr_bad_stateid;
4505 return nfs_ok;
4506}
4507
4388static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 4508static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4389{ 4509{
4390 struct nfs4_stid *s; 4510 struct nfs4_stid *s;
4391 struct nfs4_ol_stateid *ols;
4392 __be32 status = nfserr_bad_stateid; 4511 __be32 status = nfserr_bad_stateid;
4393 4512
4394 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4513 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
@@ -4418,13 +4537,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4418 break; 4537 break;
4419 case NFS4_OPEN_STID: 4538 case NFS4_OPEN_STID:
4420 case NFS4_LOCK_STID: 4539 case NFS4_LOCK_STID:
4421 ols = openlockstateid(s); 4540 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4422 if (ols->st_stateowner->so_is_open_owner
4423 && !(openowner(ols->st_stateowner)->oo_flags
4424 & NFS4_OO_CONFIRMED))
4425 status = nfserr_bad_stateid;
4426 else
4427 status = nfs_ok;
4428 break; 4541 break;
4429 default: 4542 default:
4430 printk("unknown stateid type %x\n", s->sc_type); 4543 printk("unknown stateid type %x\n", s->sc_type);
@@ -4516,8 +4629,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
4516 status = nfs4_check_fh(current_fh, stp); 4629 status = nfs4_check_fh(current_fh, stp);
4517 if (status) 4630 if (status)
4518 goto out; 4631 goto out;
4519 if (stp->st_stateowner->so_is_open_owner 4632 status = nfsd4_check_openowner_confirmed(stp);
4520 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 4633 if (status)
4521 goto out; 4634 goto out;
4522 status = nfs4_check_openmode(stp, flags); 4635 status = nfs4_check_openmode(stp, flags);
4523 if (status) 4636 if (status)
@@ -4852,9 +4965,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4852 update_stateid(&stp->st_stid.sc_stateid); 4965 update_stateid(&stp->st_stid.sc_stateid);
4853 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4966 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4854 4967
4855 nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
4856 stp->st_stid.sc_file);
4857
4858 nfsd4_close_open_stateid(stp); 4968 nfsd4_close_open_stateid(stp);
4859 4969
4860 /* put reference from nfs4_preprocess_seqid_op */ 4970 /* put reference from nfs4_preprocess_seqid_op */
@@ -6488,6 +6598,7 @@ nfs4_state_shutdown_net(struct net *net)
6488 list_for_each_safe(pos, next, &reaplist) { 6598 list_for_each_safe(pos, next, &reaplist) {
6489 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6599 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6490 list_del_init(&dp->dl_recall_lru); 6600 list_del_init(&dp->dl_recall_lru);
6601 put_clnt_odstate(dp->dl_clnt_odstate);
6491 nfs4_put_deleg_lease(dp->dl_stid.sc_file); 6602 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6492 nfs4_put_stid(&dp->dl_stid); 6603 nfs4_put_stid(&dp->dl_stid);
6493 } 6604 }
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 4f3bfeb11766..dbc4f85a5008 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -63,12 +63,12 @@ typedef struct {
63 63
64struct nfsd4_callback { 64struct nfsd4_callback {
65 struct nfs4_client *cb_clp; 65 struct nfs4_client *cb_clp;
66 struct list_head cb_per_client;
67 u32 cb_minorversion; 66 u32 cb_minorversion;
68 struct rpc_message cb_msg; 67 struct rpc_message cb_msg;
69 struct nfsd4_callback_ops *cb_ops; 68 struct nfsd4_callback_ops *cb_ops;
70 struct work_struct cb_work; 69 struct work_struct cb_work;
71 bool cb_done; 70 int cb_status;
71 bool cb_need_restart;
72}; 72};
73 73
74struct nfsd4_callback_ops { 74struct nfsd4_callback_ops {
@@ -126,6 +126,7 @@ struct nfs4_delegation {
126 struct list_head dl_perfile; 126 struct list_head dl_perfile;
127 struct list_head dl_perclnt; 127 struct list_head dl_perclnt;
128 struct list_head dl_recall_lru; /* delegation recalled */ 128 struct list_head dl_recall_lru; /* delegation recalled */
129 struct nfs4_clnt_odstate *dl_clnt_odstate;
129 u32 dl_type; 130 u32 dl_type;
130 time_t dl_time; 131 time_t dl_time;
131/* For recall: */ 132/* For recall: */
@@ -332,7 +333,6 @@ struct nfs4_client {
332 int cl_cb_state; 333 int cl_cb_state;
333 struct nfsd4_callback cl_cb_null; 334 struct nfsd4_callback cl_cb_null;
334 struct nfsd4_session *cl_cb_session; 335 struct nfsd4_session *cl_cb_session;
335 struct list_head cl_callbacks; /* list of in-progress callbacks */
336 336
337 /* for all client information that callback code might need: */ 337 /* for all client information that callback code might need: */
338 spinlock_t cl_lock; 338 spinlock_t cl_lock;
@@ -465,6 +465,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
465} 465}
466 466
467/* 467/*
468 * Per-client state indicating no. of opens and outstanding delegations
469 * on a file from a particular client.'od' stands for 'open & delegation'
470 */
471struct nfs4_clnt_odstate {
472 struct nfs4_client *co_client;
473 struct nfs4_file *co_file;
474 struct list_head co_perfile;
475 atomic_t co_odcount;
476};
477
478/*
468 * nfs4_file: a file opened by some number of (open) nfs4_stateowners. 479 * nfs4_file: a file opened by some number of (open) nfs4_stateowners.
469 * 480 *
470 * These objects are global. nfsd keeps one instance of a nfs4_file per 481 * These objects are global. nfsd keeps one instance of a nfs4_file per
@@ -485,6 +496,7 @@ struct nfs4_file {
485 struct list_head fi_delegations; 496 struct list_head fi_delegations;
486 struct rcu_head fi_rcu; 497 struct rcu_head fi_rcu;
487 }; 498 };
499 struct list_head fi_clnt_odstate;
488 /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ 500 /* One each for O_RDONLY, O_WRONLY, O_RDWR: */
489 struct file * fi_fds[3]; 501 struct file * fi_fds[3];
490 /* 502 /*
@@ -526,6 +538,7 @@ struct nfs4_ol_stateid {
526 struct list_head st_perstateowner; 538 struct list_head st_perstateowner;
527 struct list_head st_locks; 539 struct list_head st_locks;
528 struct nfs4_stateowner * st_stateowner; 540 struct nfs4_stateowner * st_stateowner;
541 struct nfs4_clnt_odstate * st_clnt_odstate;
529 unsigned char st_access_bmap; 542 unsigned char st_access_bmap;
530 unsigned char st_deny_bmap; 543 unsigned char st_deny_bmap;
531 struct nfs4_ol_stateid * st_openstp; 544 struct nfs4_ol_stateid * st_openstp;
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index f982ae84f0cd..2f8c092be2b3 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -247,6 +247,7 @@ struct nfsd4_open {
247 struct nfs4_openowner *op_openowner; /* used during processing */ 247 struct nfs4_openowner *op_openowner; /* used during processing */
248 struct nfs4_file *op_file; /* used during processing */ 248 struct nfs4_file *op_file; /* used during processing */
249 struct nfs4_ol_stateid *op_stp; /* used during processing */ 249 struct nfs4_ol_stateid *op_stp; /* used during processing */
250 struct nfs4_clnt_odstate *op_odstate; /* used during processing */
250 struct nfs4_acl *op_acl; 251 struct nfs4_acl *op_acl;
251 struct xdr_netobj op_label; 252 struct xdr_netobj op_label;
252}; 253};
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 059f37137f9a..919fd5bb14a8 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
388 nchildren = nilfs_btree_node_get_nchildren(node); 388 nchildren = nilfs_btree_node_get_nchildren(node);
389 389
390 if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || 390 if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
391 level > NILFS_BTREE_LEVEL_MAX || 391 level >= NILFS_BTREE_LEVEL_MAX ||
392 nchildren < 0 || 392 nchildren < 0 ||
393 nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { 393 nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
394 pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", 394 pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a6944b25fd5b..fdf4b41d0609 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -757,6 +757,19 @@ lookup:
757 if (tmpres) { 757 if (tmpres) {
758 spin_unlock(&dlm->spinlock); 758 spin_unlock(&dlm->spinlock);
759 spin_lock(&tmpres->spinlock); 759 spin_lock(&tmpres->spinlock);
760
761 /*
762 * Right after dlm spinlock was released, dlm_thread could have
763 * purged the lockres. Check if lockres got unhashed. If so
764 * start over.
765 */
766 if (hlist_unhashed(&tmpres->hash_node)) {
767 spin_unlock(&tmpres->spinlock);
768 dlm_lockres_put(tmpres);
769 tmpres = NULL;
770 goto lookup;
771 }
772
760 /* Wait on the thread that is mastering the resource */ 773 /* Wait on the thread that is mastering the resource */
761 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 774 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
762 __dlm_wait_on_lockres(tmpres); 775 __dlm_wait_on_lockres(tmpres);
diff --git a/fs/splice.c b/fs/splice.c
index 476024bb6546..bfe62ae40f40 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1161,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1161 long ret, bytes; 1161 long ret, bytes;
1162 umode_t i_mode; 1162 umode_t i_mode;
1163 size_t len; 1163 size_t len;
1164 int i, flags; 1164 int i, flags, more;
1165 1165
1166 /* 1166 /*
1167 * We require the input being a regular file, as we don't want to 1167 * We require the input being a regular file, as we don't want to
@@ -1204,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1204 * Don't block on output, we have to drain the direct pipe. 1204 * Don't block on output, we have to drain the direct pipe.
1205 */ 1205 */
1206 sd->flags &= ~SPLICE_F_NONBLOCK; 1206 sd->flags &= ~SPLICE_F_NONBLOCK;
1207 more = sd->flags & SPLICE_F_MORE;
1207 1208
1208 while (len) { 1209 while (len) {
1209 size_t read_len; 1210 size_t read_len;
@@ -1217,6 +1218,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1217 sd->total_len = read_len; 1218 sd->total_len = read_len;
1218 1219
1219 /* 1220 /*
1221 * If more data is pending, set SPLICE_F_MORE
1222 * If this is the last data and SPLICE_F_MORE was not set
1223 * initially, clears it.
1224 */
1225 if (read_len < len)
1226 sd->flags |= SPLICE_F_MORE;
1227 else if (!more)
1228 sd->flags &= ~SPLICE_F_MORE;
1229 /*
1220 * NOTE: nonblocking mode only applies to the input. We 1230 * NOTE: nonblocking mode only applies to the input. We
1221 * must not do the output in nonblocking mode as then we 1231 * must not do the output in nonblocking mode as then we
1222 * could get stuck data in the internal pipe: 1232 * could get stuck data in the internal pipe:
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1b25e35ea5f..b7299febc4b4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -220,7 +220,7 @@ enum rq_flag_bits {
220 220
221/* This mask is used for both bio and request merge checking */ 221/* This mask is used for both bio and request merge checking */
222#define REQ_NOMERGE_FLAGS \ 222#define REQ_NOMERGE_FLAGS \
223 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) 223 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
224 224
225#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 225#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
226#define REQ_THROTTLED (1ULL << __REQ_THROTTLED) 226#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cdf13ca7cac3..371e560d13cf 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -9,10 +9,24 @@
9 + __GNUC_MINOR__ * 100 \ 9 + __GNUC_MINOR__ * 100 \
10 + __GNUC_PATCHLEVEL__) 10 + __GNUC_PATCHLEVEL__)
11 11
12
13/* Optimization barrier */ 12/* Optimization barrier */
13
14/* The "volatile" is due to gcc bugs */ 14/* The "volatile" is due to gcc bugs */
15#define barrier() __asm__ __volatile__("": : :"memory") 15#define barrier() __asm__ __volatile__("": : :"memory")
16/*
17 * This version is i.e. to prevent dead stores elimination on @ptr
18 * where gcc and llvm may behave differently when otherwise using
19 * normal barrier(): while gcc behavior gets along with a normal
20 * barrier(), llvm needs an explicit input variable to be assumed
21 * clobbered. The issue is as follows: while the inline asm might
22 * access any memory it wants, the compiler could have fit all of
23 * @ptr into memory registers instead, and since @ptr never escaped
24 * from that, it proofed that the inline asm wasn't touching any of
25 * it. This version works well with both compilers, i.e. we're telling
26 * the compiler that the inline asm absolutely may see the contents
27 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
28 */
29#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
16 30
17/* 31/*
18 * This macro obfuscates arithmetic on a variable address so that gcc 32 * This macro obfuscates arithmetic on a variable address so that gcc
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index ba147a1727e6..0c9a2f2c2802 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -13,9 +13,12 @@
13/* Intel ECC compiler doesn't support gcc specific asm stmts. 13/* Intel ECC compiler doesn't support gcc specific asm stmts.
14 * It uses intrinsics to do the equivalent things. 14 * It uses intrinsics to do the equivalent things.
15 */ 15 */
16#undef barrier_data
16#undef RELOC_HIDE 17#undef RELOC_HIDE
17#undef OPTIMIZER_HIDE_VAR 18#undef OPTIMIZER_HIDE_VAR
18 19
20#define barrier_data(ptr) barrier()
21
19#define RELOC_HIDE(ptr, off) \ 22#define RELOC_HIDE(ptr, off) \
20 ({ unsigned long __ptr; \ 23 ({ unsigned long __ptr; \
21 __ptr = (unsigned long) (ptr); \ 24 __ptr = (unsigned long) (ptr); \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 0e41ca0e5927..867722591be2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
169# define barrier() __memory_barrier() 169# define barrier() __memory_barrier()
170#endif 170#endif
171 171
172#ifndef barrier_data
173# define barrier_data(ptr) barrier()
174#endif
175
172/* Unreachable code */ 176/* Unreachable code */
173#ifndef unreachable 177#ifndef unreachable
174# define unreachable() do { } while (1) 178# define unreachable() do { } while (1)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 46e83c2156c6..f9ecf63d47f1 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -46,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p,
46 const unsigned char *buf, int len); 46 const unsigned char *buf, int len);
47 47
48const char *ftrace_print_array_seq(struct trace_seq *p, 48const char *ftrace_print_array_seq(struct trace_seq *p,
49 const void *buf, int buf_len, 49 const void *buf, int count,
50 size_t el_size); 50 size_t el_size);
51 51
52struct trace_iterator; 52struct trace_iterator;
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 36ec4ae74634..9de976b4f9a7 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,8 +95,6 @@
95 95
96struct device_node; 96struct device_node;
97 97
98extern struct irq_chip gic_arch_extn;
99
100void gic_set_irqchip_flags(unsigned long flags); 98void gic_set_irqchip_flags(unsigned long flags);
101void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, 99void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
102 u32 offset, struct device_node *); 100 u32 offset, struct device_node *);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 8dad4a307bb8..28aeae46f355 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -205,6 +205,7 @@ enum {
205 ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ 205 ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
206 ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ 206 ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
207 ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ 207 ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
208 ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
208 209
209 /* struct ata_port flags */ 210 /* struct ata_port flags */
210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 211 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -309,6 +310,12 @@ enum {
309 */ 310 */
310 ATA_TMOUT_PMP_SRST_WAIT = 5000, 311 ATA_TMOUT_PMP_SRST_WAIT = 5000,
311 312
313 /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
314 * be a spurious PHY event, so ignore the first PHY event that
315 * occurs within 10s after the policy change.
316 */
317 ATA_TMOUT_SPURIOUS_PHY = 10000,
318
312 /* ATA bus states */ 319 /* ATA bus states */
313 BUS_UNKNOWN = 0, 320 BUS_UNKNOWN = 0,
314 BUS_DMA = 1, 321 BUS_DMA = 1,
@@ -788,6 +795,8 @@ struct ata_link {
788 struct ata_eh_context eh_context; 795 struct ata_eh_context eh_context;
789 796
790 struct ata_device device[ATA_MAX_DEVICES]; 797 struct ata_device device[ATA_MAX_DEVICES];
798
799 unsigned long last_lpm_change; /* when last LPM change happened */
791}; 800};
792#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) 801#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
793#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) 802#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
@@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
1201extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); 1210extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
1202extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); 1211extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
1203extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); 1212extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
1213extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
1204 1214
1205extern int ata_cable_40wire(struct ata_port *ap); 1215extern int ata_cable_40wire(struct ata_port *ap);
1206extern int ata_cable_80wire(struct ata_port *ap); 1216extern int ata_cable_80wire(struct ata_port *ap);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fa57915f440c..cd0951c1893d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -25,7 +25,6 @@
25#ifndef _LINUX_NETDEVICE_H 25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H 26#define _LINUX_NETDEVICE_H
27 27
28#include <linux/pm_qos.h>
29#include <linux/timer.h> 28#include <linux/timer.h>
30#include <linux/bug.h> 29#include <linux/bug.h>
31#include <linux/delay.h> 30#include <linux/delay.h>
@@ -1499,8 +1498,6 @@ enum netdev_priv_flags {
1499 * 1498 *
1500 * @qdisc_tx_busylock: XXX: need comments on this one 1499 * @qdisc_tx_busylock: XXX: need comments on this one
1501 * 1500 *
1502 * @pm_qos_req: Power Management QoS object
1503 *
1504 * FIXME: cleanup struct net_device such that network protocol info 1501 * FIXME: cleanup struct net_device such that network protocol info
1505 * moves out. 1502 * moves out.
1506 */ 1503 */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ff3fea3194c6..9abb763e4b86 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -460,7 +460,7 @@ struct nilfs_btree_node {
460/* level */ 460/* level */
461#define NILFS_BTREE_LEVEL_DATA 0 461#define NILFS_BTREE_LEVEL_DATA 0
462#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) 462#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
463#define NILFS_BTREE_LEVEL_MAX 14 463#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
464 464
465/** 465/**
466 * struct nilfs_palloc_group_desc - block group descriptor 466 * struct nilfs_palloc_group_desc - block group descriptor
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 38cff8f6716d..2f7b9a40f627 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2541,10 +2541,6 @@
2541 2541
2542#define PCI_VENDOR_ID_INTEL 0x8086 2542#define PCI_VENDOR_ID_INTEL 0x8086
2543#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 2543#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
2544#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
2545#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
2546#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
2547#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
2548#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 2544#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
2549#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 2545#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
2550#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 2546#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index d5f4fb69dba3..f9b2ce58039b 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -5,7 +5,7 @@
5({ \ 5({ \
6 typeof(as) __fc_i, __fc_as = (as) - 1; \ 6 typeof(as) __fc_i, __fc_as = (as) - 1; \
7 typeof(x) __fc_x = (x); \ 7 typeof(x) __fc_x = (x); \
8 typeof(*a) *__fc_a = (a); \ 8 typeof(*a) const *__fc_a = (a); \
9 for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ 9 for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
10 if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ 10 if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
11 __fc_a[__fc_i + 1], 2)) \ 11 __fc_a[__fc_i + 1], 2)) \
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index eeda67652766..6ea16c84293b 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -30,11 +30,13 @@ struct wpan_phy_cca;
30struct cfg802154_ops { 30struct cfg802154_ops {
31 struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, 31 struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
32 const char *name, 32 const char *name,
33 unsigned char name_assign_type,
33 int type); 34 int type);
34 void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, 35 void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
35 struct net_device *dev); 36 struct net_device *dev);
36 int (*add_virtual_intf)(struct wpan_phy *wpan_phy, 37 int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
37 const char *name, 38 const char *name,
39 unsigned char name_assign_type,
38 enum nl802154_iftype type, 40 enum nl802154_iftype type,
39 __le64 extended_addr); 41 __le64 extended_addr);
40 int (*del_virtual_intf)(struct wpan_phy *wpan_phy, 42 int (*del_virtual_intf)(struct wpan_phy *wpan_phy,
diff --git a/include/net/codel.h b/include/net/codel.h
index 8c0f78f209e8..267e70210061 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -121,12 +121,14 @@ static inline u32 codel_time_to_us(codel_time_t val)
121 * @target: target queue size (in time units) 121 * @target: target queue size (in time units)
122 * @ce_threshold: threshold for marking packets with ECN CE 122 * @ce_threshold: threshold for marking packets with ECN CE
123 * @interval: width of moving time window 123 * @interval: width of moving time window
124 * @mtu: device mtu, or minimal queue backlog in bytes.
124 * @ecn: is Explicit Congestion Notification enabled 125 * @ecn: is Explicit Congestion Notification enabled
125 */ 126 */
126struct codel_params { 127struct codel_params {
127 codel_time_t target; 128 codel_time_t target;
128 codel_time_t ce_threshold; 129 codel_time_t ce_threshold;
129 codel_time_t interval; 130 codel_time_t interval;
131 u32 mtu;
130 bool ecn; 132 bool ecn;
131}; 133};
132 134
@@ -172,10 +174,12 @@ struct codel_stats {
172 174
173#define CODEL_DISABLED_THRESHOLD INT_MAX 175#define CODEL_DISABLED_THRESHOLD INT_MAX
174 176
175static void codel_params_init(struct codel_params *params) 177static void codel_params_init(struct codel_params *params,
178 const struct Qdisc *sch)
176{ 179{
177 params->interval = MS2TIME(100); 180 params->interval = MS2TIME(100);
178 params->target = MS2TIME(5); 181 params->target = MS2TIME(5);
182 params->mtu = psched_mtu(qdisc_dev(sch));
179 params->ce_threshold = CODEL_DISABLED_THRESHOLD; 183 params->ce_threshold = CODEL_DISABLED_THRESHOLD;
180 params->ecn = false; 184 params->ecn = false;
181} 185}
@@ -187,7 +191,7 @@ static void codel_vars_init(struct codel_vars *vars)
187 191
188static void codel_stats_init(struct codel_stats *stats) 192static void codel_stats_init(struct codel_stats *stats)
189{ 193{
190 stats->maxpacket = 256; 194 stats->maxpacket = 0;
191} 195}
192 196
193/* 197/*
@@ -241,7 +245,7 @@ static bool codel_should_drop(const struct sk_buff *skb,
241 stats->maxpacket = qdisc_pkt_len(skb); 245 stats->maxpacket = qdisc_pkt_len(skb);
242 246
243 if (codel_time_before(vars->ldelay, params->target) || 247 if (codel_time_before(vars->ldelay, params->target) ||
244 sch->qstats.backlog <= stats->maxpacket) { 248 sch->qstats.backlog <= params->mtu) {
245 /* went below - stay below for at least interval */ 249 /* went below - stay below for at least interval */
246 vars->first_above_time = 0; 250 vars->first_above_time = 0;
247 return false; 251 return false;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 67e0df14ba0f..2f8b7decace0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1728,6 +1728,8 @@ struct ieee80211_tx_control {
1728 * @sta: station table entry, %NULL for per-vif queue 1728 * @sta: station table entry, %NULL for per-vif queue
1729 * @tid: the TID for this queue (unused for per-vif queue) 1729 * @tid: the TID for this queue (unused for per-vif queue)
1730 * @ac: the AC for this queue 1730 * @ac: the AC for this queue
1731 * @drv_priv: data area for driver use, will always be aligned to
1732 * sizeof(void *).
1731 * 1733 *
1732 * The driver can obtain packets from this queue by calling 1734 * The driver can obtain packets from this queue by calling
1733 * ieee80211_tx_dequeue(). 1735 * ieee80211_tx_dequeue().
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index e18e7fd43f47..7df28a4c23f9 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -247,19 +247,109 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
247 __put_unaligned_memmove64(swab64p(le64_src), be64_dst); 247 __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
248} 248}
249 249
250/* Basic interface to register ieee802154 device */ 250/**
251 * ieee802154_alloc_hw - Allocate a new hardware device
252 *
253 * This must be called once for each hardware device. The returned pointer
254 * must be used to refer to this device when calling other functions.
255 * mac802154 allocates a private data area for the driver pointed to by
256 * @priv in &struct ieee802154_hw, the size of this area is given as
257 * @priv_data_len.
258 *
259 * @priv_data_len: length of private data
260 * @ops: callbacks for this device
261 *
262 * Return: A pointer to the new hardware device, or %NULL on error.
263 */
251struct ieee802154_hw * 264struct ieee802154_hw *
252ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); 265ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
266
267/**
268 * ieee802154_free_hw - free hardware descriptor
269 *
270 * This function frees everything that was allocated, including the
271 * private data for the driver. You must call ieee802154_unregister_hw()
272 * before calling this function.
273 *
274 * @hw: the hardware to free
275 */
253void ieee802154_free_hw(struct ieee802154_hw *hw); 276void ieee802154_free_hw(struct ieee802154_hw *hw);
277
278/**
279 * ieee802154_register_hw - Register hardware device
280 *
281 * You must call this function before any other functions in
282 * mac802154. Note that before a hardware can be registered, you
283 * need to fill the contained wpan_phy's information.
284 *
285 * @hw: the device to register as returned by ieee802154_alloc_hw()
286 *
287 * Return: 0 on success. An error code otherwise.
288 */
254int ieee802154_register_hw(struct ieee802154_hw *hw); 289int ieee802154_register_hw(struct ieee802154_hw *hw);
290
291/**
292 * ieee802154_unregister_hw - Unregister a hardware device
293 *
294 * This function instructs mac802154 to free allocated resources
295 * and unregister netdevices from the networking subsystem.
296 *
297 * @hw: the hardware to unregister
298 */
255void ieee802154_unregister_hw(struct ieee802154_hw *hw); 299void ieee802154_unregister_hw(struct ieee802154_hw *hw);
256 300
301/**
302 * ieee802154_rx - receive frame
303 *
304 * Use this function to hand received frames to mac802154. The receive
305 * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
306 * paged @skb is used, the driver is recommended to put the ieee802154
307 * header of the frame on the linear part of the @skb to avoid memory
308 * allocation and/or memcpy by the stack.
309 *
310 * This function may not be called in IRQ context. Calls to this function
311 * for a single hardware must be synchronized against each other.
312 *
313 * @hw: the hardware this frame came in on
314 * @skb: the buffer to receive, owned by mac802154 after this call
315 */
257void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb); 316void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
317
318/**
319 * ieee802154_rx_irqsafe - receive frame
320 *
321 * Like ieee802154_rx() but can be called in IRQ context
322 * (internally defers to a tasklet.)
323 *
324 * @hw: the hardware this frame came in on
325 * @skb: the buffer to receive, owned by mac802154 after this call
326 * @lqi: link quality indicator
327 */
258void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, 328void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
259 u8 lqi); 329 u8 lqi);
260 330/**
331 * ieee802154_wake_queue - wake ieee802154 queue
332 * @hw: pointer as obtained from ieee802154_alloc_hw().
333 *
334 * Drivers should use this function instead of netif_wake_queue.
335 */
261void ieee802154_wake_queue(struct ieee802154_hw *hw); 336void ieee802154_wake_queue(struct ieee802154_hw *hw);
337
338/**
339 * ieee802154_stop_queue - stop ieee802154 queue
340 * @hw: pointer as obtained from ieee802154_alloc_hw().
341 *
342 * Drivers should use this function instead of netif_stop_queue.
343 */
262void ieee802154_stop_queue(struct ieee802154_hw *hw); 344void ieee802154_stop_queue(struct ieee802154_hw *hw);
345
346/**
347 * ieee802154_xmit_complete - frame transmission complete
348 *
349 * @hw: pointer as obtained from ieee802154_alloc_hw().
350 * @skb: buffer for transmission
351 * @ifs_handling: indicate interframe space handling
352 */
263void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, 353void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
264 bool ifs_handling); 354 bool ifs_handling);
265 355
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ce55906b54a0..ac54c27a2bfd 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
160} 160}
161 161
162/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ 162/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
163static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) 163static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
164{ 164{
165 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { 165 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
166 struct sockaddr_in *out_in = (struct sockaddr_in *)out; 166 struct sockaddr_in *out_in = (struct sockaddr_in *)out;
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
173 out_in->sin6_family = AF_INET6; 173 out_in->sin6_family = AF_INET6;
174 memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); 174 memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
175 } 175 }
176 return 0;
177} 176}
178 177
179static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, 178static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30647d5..39ed2d2fbd51 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,7 +105,8 @@ enum ib_cm_data_size {
105 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, 105 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
106 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, 106 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
107 IB_CM_SIDR_REP_INFO_LENGTH = 72, 107 IB_CM_SIDR_REP_INFO_LENGTH = 72,
108 IB_CM_COMPARE_SIZE = 64 108 /* compare done u32 at a time */
109 IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
109}; 110};
110 111
111struct ib_cm_id; 112struct ib_cm_id;
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
337#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 338#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
338 339
339struct ib_cm_compare_data { 340struct ib_cm_compare_data {
340 u8 data[IB_CM_COMPARE_SIZE]; 341 u32 data[IB_CM_COMPARE_SIZE];
341 u8 mask[IB_CM_COMPARE_SIZE]; 342 u32 mask[IB_CM_COMPARE_SIZE];
342}; 343};
343 344
344/** 345/**
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index 928b2775e992..fda31673a562 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
148int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); 148int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
149 149
150/** 150/**
151 * iwpm_remote_info_cb - Process remote connecting peer address info, which
152 * the port mapper has received from the connecting peer
153 *
154 * @cb: Contains the received message (payload and netlink header)
155 *
156 * Stores the IPv4/IPv6 address info in a hash table
157 */
158int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
159
160/**
151 * iwpm_mapping_error_cb - Process port mapper notification for error 161 * iwpm_mapping_error_cb - Process port mapper notification for error
152 * 162 *
153 * @skb: 163 * @skb:
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
175int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); 185int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
176 186
177/** 187/**
188 * iwpm_get_remote_info - Get the remote connecting peer address info
189 *
190 * @mapped_loc_addr: Mapped local address of the listening peer
191 * @mapped_rem_addr: Mapped remote address of the connecting peer
192 * @remote_addr: To store the remote address of the connecting peer
193 * @nl_client: The index of the netlink client
194 *
195 * The remote address info is retrieved and provided to the client in
196 * the remote_addr. After that it is removed from the hash table
197 */
198int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
199 struct sockaddr_storage *mapped_rem_addr,
200 struct sockaddr_storage *remote_addr, u8 nl_client);
201
202/**
178 * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address 203 * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
179 * info in a hash table 204 * info in a hash table
180 * @local_addr: Local ip/tcp address 205 * @local_addr: Local ip/tcp address
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab7c380..96e3f56519e7 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
36 for sequential scan */ 36 for sequential scan */
37#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ 37#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
38#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ 38#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
39#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
39 40
40#endif 41#endif
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index bc9abfe88c9a..139d4dd1cab8 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -31,4 +31,14 @@ struct mpls_label {
31#define MPLS_LS_TTL_MASK 0x000000FF 31#define MPLS_LS_TTL_MASK 0x000000FF
32#define MPLS_LS_TTL_SHIFT 0 32#define MPLS_LS_TTL_SHIFT 0
33 33
34/* Reserved labels */
35#define MPLS_LABEL_IPV4NULL 0 /* RFC3032 */
36#define MPLS_LABEL_RTALERT 1 /* RFC3032 */
37#define MPLS_LABEL_IPV6NULL 2 /* RFC3032 */
38#define MPLS_LABEL_IMPLNULL 3 /* RFC3032 */
39#define MPLS_LABEL_ENTROPY 7 /* RFC6790 */
40#define MPLS_LABEL_GAL 13 /* RFC5586 */
41#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
42#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
43
34#endif /* _UAPI_MPLS_H */ 44#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index de69170a30ce..6e4bb4270ca2 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -37,6 +37,7 @@ enum {
37 RDMA_NL_IWPM_ADD_MAPPING, 37 RDMA_NL_IWPM_ADD_MAPPING,
38 RDMA_NL_IWPM_QUERY_MAPPING, 38 RDMA_NL_IWPM_QUERY_MAPPING,
39 RDMA_NL_IWPM_REMOVE_MAPPING, 39 RDMA_NL_IWPM_REMOVE_MAPPING,
40 RDMA_NL_IWPM_REMOTE_INFO,
40 RDMA_NL_IWPM_HANDLE_ERR, 41 RDMA_NL_IWPM_HANDLE_ERR,
41 RDMA_NL_IWPM_MAPINFO, 42 RDMA_NL_IWPM_MAPINFO,
42 RDMA_NL_IWPM_MAPINFO_NUM, 43 RDMA_NL_IWPM_MAPINFO_NUM,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 143ca5ffab7a..4478f4b4aae2 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
191 struct gnttab_unmap_grant_ref *kunmap_ops, 191 struct gnttab_unmap_grant_ref *kunmap_ops,
192 struct page **pages, unsigned int count); 192 struct page **pages, unsigned int count);
193void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); 193void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
194int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
194 195
195 196
196/* Perform a batch of grant map/copy operations. Retry every batch slot 197/* Perform a batch of grant map/copy operations. Retry every batch slot
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index c643e6a94c9a..0ce4f32017ea 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled);
13 13
14void xen_timer_resume(void); 14void xen_timer_resume(void);
15void xen_arch_resume(void); 15void xen_arch_resume(void);
16void xen_arch_suspend(void);
16 17
17void xen_resume_notifier_register(struct notifier_block *nb); 18void xen_resume_notifier_register(struct notifier_block *nb);
18void xen_resume_notifier_unregister(struct notifier_block *nb); 19void xen_resume_notifier_unregister(struct notifier_block *nb);
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 8369ffa5f33d..a95bbdb2a502 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -225,10 +225,11 @@ dev_t name_to_dev_t(const char *name)
225#endif 225#endif
226 226
227 if (strncmp(name, "/dev/", 5) != 0) { 227 if (strncmp(name, "/dev/", 5) != 0) {
228 unsigned maj, min; 228 unsigned maj, min, offset;
229 char dummy; 229 char dummy;
230 230
231 if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) { 231 if ((sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) ||
232 (sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3)) {
232 res = MKDEV(maj, min); 233 res = MKDEV(maj, min);
233 if (maj != MAJOR(res) || min != MINOR(res)) 234 if (maj != MAJOR(res) || min != MINOR(res))
234 goto fail; 235 goto fail;
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
index 988dc58e8847..2feb6feca0cc 100644
--- a/kernel/irq/dummychip.c
+++ b/kernel/irq/dummychip.c
@@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = {
57 .irq_ack = noop, 57 .irq_ack = noop,
58 .irq_mask = noop, 58 .irq_mask = noop,
59 .irq_unmask = noop, 59 .irq_unmask = noop,
60 .flags = IRQCHIP_SKIP_SET_WAKE,
60}; 61};
61EXPORT_SYMBOL_GPL(dummy_irq_chip); 62EXPORT_SYMBOL_GPL(dummy_irq_chip);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 233165da782f..8cf7304b2867 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -162,11 +162,14 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
162static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; 162static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
163module_param(kthread_prio, int, 0644); 163module_param(kthread_prio, int, 0644);
164 164
165/* Delay in jiffies for grace-period initialization delays. */ 165/* Delay in jiffies for grace-period initialization delays, debug only. */
166static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) 166#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
167 ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY 167static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
168 : 0;
169module_param(gp_init_delay, int, 0644); 168module_param(gp_init_delay, int, 0644);
169#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
170static const int gp_init_delay;
171#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
172#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
170 173
171/* 174/*
172 * Track the rcutorture test sequence number and the update version 175 * Track the rcutorture test sequence number and the update version
@@ -1843,9 +1846,8 @@ static int rcu_gp_init(struct rcu_state *rsp)
1843 raw_spin_unlock_irq(&rnp->lock); 1846 raw_spin_unlock_irq(&rnp->lock);
1844 cond_resched_rcu_qs(); 1847 cond_resched_rcu_qs();
1845 ACCESS_ONCE(rsp->gp_activity) = jiffies; 1848 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1846 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) && 1849 if (gp_init_delay > 0 &&
1847 gp_init_delay > 0 && 1850 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
1848 !(rsp->gpnum % (rcu_num_nodes * 10)))
1849 schedule_timeout_uninterruptible(gp_init_delay); 1851 schedule_timeout_uninterruptible(gp_init_delay);
1850 } 1852 }
1851 1853
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 11dc22a6983b..637a09461c1d 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
117 /* Transition with new state-specific callbacks */ 117 /* Transition with new state-specific callbacks */
118 switch (state) { 118 switch (state) {
119 case CLOCK_EVT_STATE_DETACHED: 119 case CLOCK_EVT_STATE_DETACHED:
120 /* 120 /* The clockevent device is getting replaced. Shut it down. */
121 * This is an internal state, which is guaranteed to go from
122 * SHUTDOWN to DETACHED. No driver interaction required.
123 */
124 return 0;
125 121
126 case CLOCK_EVT_STATE_SHUTDOWN: 122 case CLOCK_EVT_STATE_SHUTDOWN:
127 return dev->set_state_shutdown(dev); 123 return dev->set_state_shutdown(dev);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 692bf7184c8c..25a086bcb700 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
178EXPORT_SYMBOL(ftrace_print_hex_seq); 178EXPORT_SYMBOL(ftrace_print_hex_seq);
179 179
180const char * 180const char *
181ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, 181ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count,
182 size_t el_size) 182 size_t el_size)
183{ 183{
184 const char *ret = trace_seq_buffer_ptr(p); 184 const char *ret = trace_seq_buffer_ptr(p);
185 const char *prefix = ""; 185 const char *prefix = "";
186 void *ptr = (void *)buf; 186 void *ptr = (void *)buf;
187 size_t buf_len = count * el_size;
187 188
188 trace_seq_putc(p, '{'); 189 trace_seq_putc(p, '{');
189 190
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 17670573dda8..ba2b0c87e65b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY
1281 int "How much to slow down RCU grace-period initialization" 1281 int "How much to slow down RCU grace-period initialization"
1282 range 0 5 1282 range 0 5
1283 default 3 1283 default 3
1284 depends on RCU_TORTURE_TEST_SLOW_INIT
1284 help 1285 help
1285 This option specifies the number of jiffies to wait between 1286 This option specifies the number of jiffies to wait between
1286 each rcu_node structure initialization. 1287 each rcu_node structure initialization.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fecaedc80a2..777eda7d1ab4 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -10,8 +10,11 @@ config KASAN
10 help 10 help
11 Enables kernel address sanitizer - runtime memory debugger, 11 Enables kernel address sanitizer - runtime memory debugger,
12 designed to find out-of-bounds accesses and use-after-free bugs. 12 designed to find out-of-bounds accesses and use-after-free bugs.
13 This is strictly debugging feature. It consumes about 1/8 13 This is strictly a debugging feature and it requires a gcc version
14 of available memory and brings about ~x3 performance slowdown. 14 of 4.9.2 or later. Detection of out of bounds accesses to stack or
15 global variables requires gcc 5.0 or later.
16 This feature consumes about 1/8 of available memory and brings about
17 ~x3 performance slowdown.
15 For better error detection enable CONFIG_STACKTRACE, 18 For better error detection enable CONFIG_STACKTRACE,
16 and add slub_debug=U to boot cmdline. 19 and add slub_debug=U to boot cmdline.
17 20
@@ -40,6 +43,7 @@ config KASAN_INLINE
40 memory accesses. This is faster than outline (in some workloads 43 memory accesses. This is faster than outline (in some workloads
41 it gives about x2 boost over outline instrumentation), but 44 it gives about x2 boost over outline instrumentation), but
42 make kernel's .text size much bigger. 45 make kernel's .text size much bigger.
46 This requires a gcc version of 5.0 or later.
43 47
44endchoice 48endchoice
45 49
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
deleted file mode 100644
index 3e3be40c6a6e..000000000000
--- a/lib/find_last_bit.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/* find_last_bit.c: fallback find next bit implementation
2 *
3 * Copyright (C) 2008 IBM Corporation
4 * Written by Rusty Russell <rusty@rustcorp.com.au>
5 * (Inspired by David Howell's find_next_bit implementation)
6 *
7 * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
8 * size and improve performance, 2015.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/bitops.h>
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20
21#ifndef find_last_bit
22
23unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
24{
25 if (size) {
26 unsigned long val = BITMAP_LAST_WORD_MASK(size);
27 unsigned long idx = (size-1) / BITS_PER_LONG;
28
29 do {
30 val &= addr[idx];
31 if (val)
32 return idx * BITS_PER_LONG + __fls(val);
33
34 val = ~0ul;
35 } while (idx--);
36 }
37 return size;
38}
39EXPORT_SYMBOL(find_last_bit);
40
41#endif
diff --git a/lib/string.c b/lib/string.c
index a5792019193c..bb3d4b6993c4 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
607void memzero_explicit(void *s, size_t count) 607void memzero_explicit(void *s, size_t count)
608{ 608{
609 memset(s, 0, count); 609 memset(s, 0, count);
610 barrier(); 610 barrier_data(s);
611} 611}
612EXPORT_SYMBOL(memzero_explicit); 612EXPORT_SYMBOL(memzero_explicit);
613 613
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 329caf56df22..4ca5fe0042e1 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -34,13 +34,13 @@ static int hwpoison_inject(void *data, u64 val)
34 if (!hwpoison_filter_enable) 34 if (!hwpoison_filter_enable)
35 goto inject; 35 goto inject;
36 36
37 if (!PageLRU(p) && !PageHuge(p)) 37 if (!PageLRU(hpage) && !PageHuge(p))
38 shake_page(p, 0); 38 shake_page(hpage, 0);
39 /* 39 /*
40 * This implies unable to support non-LRU pages. 40 * This implies unable to support non-LRU pages.
41 */ 41 */
42 if (!PageLRU(p) && !PageHuge(p)) 42 if (!PageLRU(hpage) && !PageHuge(p))
43 return 0; 43 goto put_out;
44 44
45 /* 45 /*
46 * do a racy check with elevated page count, to make sure PG_hwpoison 46 * do a racy check with elevated page count, to make sure PG_hwpoison
@@ -52,11 +52,14 @@ static int hwpoison_inject(void *data, u64 val)
52 err = hwpoison_filter(hpage); 52 err = hwpoison_filter(hpage);
53 unlock_page(hpage); 53 unlock_page(hpage);
54 if (err) 54 if (err)
55 return 0; 55 goto put_out;
56 56
57inject: 57inject:
58 pr_info("Injecting memory failure at pfn %#lx\n", pfn); 58 pr_info("Injecting memory failure at pfn %#lx\n", pfn);
59 return memory_failure(pfn, 18, MF_COUNT_INCREASED); 59 return memory_failure(pfn, 18, MF_COUNT_INCREASED);
60put_out:
61 put_page(hpage);
62 return 0;
60} 63}
61 64
62static int hwpoison_unpoison(void *data, u64 val) 65static int hwpoison_unpoison(void *data, u64 val)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d9359b770cd9..501820c815b3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1187,10 +1187,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1187 * The check (unnecessarily) ignores LRU pages being isolated and 1187 * The check (unnecessarily) ignores LRU pages being isolated and
1188 * walked by the page reclaim code, however that's not a big loss. 1188 * walked by the page reclaim code, however that's not a big loss.
1189 */ 1189 */
1190 if (!PageHuge(p) && !PageTransTail(p)) { 1190 if (!PageHuge(p)) {
1191 if (!PageLRU(p)) 1191 if (!PageLRU(hpage))
1192 shake_page(p, 0); 1192 shake_page(hpage, 0);
1193 if (!PageLRU(p)) { 1193 if (!PageLRU(hpage)) {
1194 /* 1194 /*
1195 * shake_page could have turned it free. 1195 * shake_page could have turned it free.
1196 */ 1196 */
@@ -1777,12 +1777,12 @@ int soft_offline_page(struct page *page, int flags)
1777 } else if (ret == 0) { /* for free pages */ 1777 } else if (ret == 0) { /* for free pages */
1778 if (PageHuge(page)) { 1778 if (PageHuge(page)) {
1779 set_page_hwpoison_huge_page(hpage); 1779 set_page_hwpoison_huge_page(hpage);
1780 dequeue_hwpoisoned_huge_page(hpage); 1780 if (!dequeue_hwpoisoned_huge_page(hpage))
1781 atomic_long_add(1 << compound_order(hpage), 1781 atomic_long_add(1 << compound_order(hpage),
1782 &num_poisoned_pages); 1782 &num_poisoned_pages);
1783 } else { 1783 } else {
1784 SetPageHWPoison(page); 1784 if (!TestSetPageHWPoison(page))
1785 atomic_long_inc(&num_poisoned_pages); 1785 atomic_long_inc(&num_poisoned_pages);
1786 } 1786 }
1787 } 1787 }
1788 unset_migratetype_isolate(page, MIGRATE_MOVABLE); 1788 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5daf5568b9e1..eb59f7eea508 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
580 long x; 580 long x;
581 581
582 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, 582 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
583 limit - setpoint + 1); 583 (limit - setpoint) | 1);
584 pos_ratio = x; 584 pos_ratio = x;
585 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; 585 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
586 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; 586 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
807 * scale global setpoint to bdi's: 807 * scale global setpoint to bdi's:
808 * bdi_setpoint = setpoint * bdi_thresh / thresh 808 * bdi_setpoint = setpoint * bdi_thresh / thresh
809 */ 809 */
810 x = div_u64((u64)bdi_thresh << 16, thresh + 1); 810 x = div_u64((u64)bdi_thresh << 16, thresh | 1);
811 bdi_setpoint = setpoint * (u64)x >> 16; 811 bdi_setpoint = setpoint * (u64)x >> 16;
812 /* 812 /*
813 * Use span=(8*write_bw) in single bdi case as indicated by 813 * Use span=(8*write_bw) in single bdi case as indicated by
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
822 822
823 if (bdi_dirty < x_intercept - span / 4) { 823 if (bdi_dirty < x_intercept - span / 4) {
824 pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), 824 pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
825 x_intercept - bdi_setpoint + 1); 825 (x_intercept - bdi_setpoint) | 1);
826 } else 826 } else
827 pos_ratio /= 4; 827 pos_ratio /= 4;
828 828
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 476709bd068a..4663c3dad3f5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1557,7 +1557,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1557{ 1557{
1558 BT_DBG("%s %p", hdev->name, hdev); 1558 BT_DBG("%s %p", hdev->name, hdev);
1559 1559
1560 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1560 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1561 test_bit(HCI_UP, &hdev->flags)) {
1561 /* Execute vendor specific shutdown routine */ 1562 /* Execute vendor specific shutdown routine */
1562 if (hdev->shutdown) 1563 if (hdev->shutdown)
1563 hdev->shutdown(hdev); 1564 hdev->shutdown(hdev);
diff --git a/net/core/dev.c b/net/core/dev.c
index fd012bbe0486..90a568a150b4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5190,7 +5190,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
5190 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper)) 5190 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5191 return -EBUSY; 5191 return -EBUSY;
5192 5192
5193 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper)) 5193 if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
5194 return -EEXIST; 5194 return -EEXIST;
5195 5195
5196 if (master && netdev_master_upper_dev_get(dev)) 5196 if (master && netdev_master_upper_dev_get(dev))
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index a665bf490c88..cbee75f2fc28 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -641,7 +641,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
641 641
642 id = peernet2id(net, peer); 642 id = peernet2id(net, peer);
643 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 643 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
644 RTM_GETNSID, net, id); 644 RTM_NEWNSID, net, id);
645 if (err < 0) 645 if (err < 0)
646 goto err_out; 646 goto err_out;
647 647
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 05dab2957cd4..4adfd4d5471b 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_IEEE802154_SOCKET) += ieee802154_socket.o
3obj-y += 6lowpan/ 3obj-y += 6lowpan/
4 4
5ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \ 5ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
6 header_ops.o sysfs.o nl802154.o 6 header_ops.o sysfs.o nl802154.o trace.o
7ieee802154_socket-y := socket.o 7ieee802154_socket-y := socket.o
8 8
9CFLAGS_trace.o := -I$(src)
10
9ccflags-y += -D__CHECK_ENDIAN__ 11ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 1b9d25f6e898..346c6665d25e 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -175,6 +175,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
175 int rc = -ENOBUFS; 175 int rc = -ENOBUFS;
176 struct net_device *dev; 176 struct net_device *dev;
177 int type = __IEEE802154_DEV_INVALID; 177 int type = __IEEE802154_DEV_INVALID;
178 unsigned char name_assign_type;
178 179
179 pr_debug("%s\n", __func__); 180 pr_debug("%s\n", __func__);
180 181
@@ -190,8 +191,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
190 if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] 191 if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
191 != '\0') 192 != '\0')
192 return -EINVAL; /* phy name should be null-terminated */ 193 return -EINVAL; /* phy name should be null-terminated */
194 name_assign_type = NET_NAME_USER;
193 } else { 195 } else {
194 devname = "wpan%d"; 196 devname = "wpan%d";
197 name_assign_type = NET_NAME_ENUM;
195 } 198 }
196 199
197 if (strlen(devname) >= IFNAMSIZ) 200 if (strlen(devname) >= IFNAMSIZ)
@@ -221,7 +224,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
221 } 224 }
222 225
223 dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname, 226 dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
224 type); 227 name_assign_type, type);
225 if (IS_ERR(dev)) { 228 if (IS_ERR(dev)) {
226 rc = PTR_ERR(dev); 229 rc = PTR_ERR(dev);
227 goto nla_put_failure; 230 goto nla_put_failure;
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index a4daf91b8d0a..f3c12f6a4a39 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -589,7 +589,7 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
589 589
590 return rdev_add_virtual_intf(rdev, 590 return rdev_add_virtual_intf(rdev,
591 nla_data(info->attrs[NL802154_ATTR_IFNAME]), 591 nla_data(info->attrs[NL802154_ATTR_IFNAME]),
592 type, extended_addr); 592 NET_NAME_USER, type, extended_addr);
593} 593}
594 594
595static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info) 595static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 7c46732fad2b..7b5a9dd94fe5 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -4,13 +4,16 @@
4#include <net/cfg802154.h> 4#include <net/cfg802154.h>
5 5
6#include "core.h" 6#include "core.h"
7#include "trace.h"
7 8
8static inline struct net_device * 9static inline struct net_device *
9rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev, 10rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
10 const char *name, int type) 11 const char *name,
12 unsigned char name_assign_type,
13 int type)
11{ 14{
12 return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name, 15 return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name,
13 type); 16 name_assign_type, type);
14} 17}
15 18
16static inline void 19static inline void
@@ -22,75 +25,131 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
22 25
23static inline int 26static inline int
24rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name, 27rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
28 unsigned char name_assign_type,
25 enum nl802154_iftype type, __le64 extended_addr) 29 enum nl802154_iftype type, __le64 extended_addr)
26{ 30{
27 return rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, type, 31 int ret;
32
33 trace_802154_rdev_add_virtual_intf(&rdev->wpan_phy, name, type,
28 extended_addr); 34 extended_addr);
35 ret = rdev->ops->add_virtual_intf(&rdev->wpan_phy, name,
36 name_assign_type, type,
37 extended_addr);
38 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
39 return ret;
29} 40}
30 41
31static inline int 42static inline int
32rdev_del_virtual_intf(struct cfg802154_registered_device *rdev, 43rdev_del_virtual_intf(struct cfg802154_registered_device *rdev,
33 struct wpan_dev *wpan_dev) 44 struct wpan_dev *wpan_dev)
34{ 45{
35 return rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev); 46 int ret;
47
48 trace_802154_rdev_del_virtual_intf(&rdev->wpan_phy, wpan_dev);
49 ret = rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev);
50 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
51 return ret;
36} 52}
37 53
38static inline int 54static inline int
39rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel) 55rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
40{ 56{
41 return rdev->ops->set_channel(&rdev->wpan_phy, page, channel); 57 int ret;
58
59 trace_802154_rdev_set_channel(&rdev->wpan_phy, page, channel);
60 ret = rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
61 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
62 return ret;
42} 63}
43 64
44static inline int 65static inline int
45rdev_set_cca_mode(struct cfg802154_registered_device *rdev, 66rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
46 const struct wpan_phy_cca *cca) 67 const struct wpan_phy_cca *cca)
47{ 68{
48 return rdev->ops->set_cca_mode(&rdev->wpan_phy, cca); 69 int ret;
70
71 trace_802154_rdev_set_cca_mode(&rdev->wpan_phy, cca);
72 ret = rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
73 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
74 return ret;
49} 75}
50 76
51static inline int 77static inline int
52rdev_set_pan_id(struct cfg802154_registered_device *rdev, 78rdev_set_pan_id(struct cfg802154_registered_device *rdev,
53 struct wpan_dev *wpan_dev, __le16 pan_id) 79 struct wpan_dev *wpan_dev, __le16 pan_id)
54{ 80{
55 return rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id); 81 int ret;
82
83 trace_802154_rdev_set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
84 ret = rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
85 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
86 return ret;
56} 87}
57 88
58static inline int 89static inline int
59rdev_set_short_addr(struct cfg802154_registered_device *rdev, 90rdev_set_short_addr(struct cfg802154_registered_device *rdev,
60 struct wpan_dev *wpan_dev, __le16 short_addr) 91 struct wpan_dev *wpan_dev, __le16 short_addr)
61{ 92{
62 return rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr); 93 int ret;
94
95 trace_802154_rdev_set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
96 ret = rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
97 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
98 return ret;
63} 99}
64 100
65static inline int 101static inline int
66rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev, 102rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev,
67 struct wpan_dev *wpan_dev, u8 min_be, u8 max_be) 103 struct wpan_dev *wpan_dev, u8 min_be, u8 max_be)
68{ 104{
69 return rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev, 105 int ret;
106
107 trace_802154_rdev_set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
70 min_be, max_be); 108 min_be, max_be);
109 ret = rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
110 min_be, max_be);
111 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
112 return ret;
71} 113}
72 114
73static inline int 115static inline int
74rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev, 116rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev,
75 struct wpan_dev *wpan_dev, u8 max_csma_backoffs) 117 struct wpan_dev *wpan_dev, u8 max_csma_backoffs)
76{ 118{
77 return rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev, 119 int ret;
78 max_csma_backoffs); 120
121 trace_802154_rdev_set_csma_backoffs(&rdev->wpan_phy, wpan_dev,
122 max_csma_backoffs);
123 ret = rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev,
124 max_csma_backoffs);
125 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
126 return ret;
79} 127}
80 128
81static inline int 129static inline int
82rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev, 130rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev,
83 struct wpan_dev *wpan_dev, s8 max_frame_retries) 131 struct wpan_dev *wpan_dev, s8 max_frame_retries)
84{ 132{
85 return rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev, 133 int ret;
134
135 trace_802154_rdev_set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
86 max_frame_retries); 136 max_frame_retries);
137 ret = rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
138 max_frame_retries);
139 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
140 return ret;
87} 141}
88 142
89static inline int 143static inline int
90rdev_set_lbt_mode(struct cfg802154_registered_device *rdev, 144rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
91 struct wpan_dev *wpan_dev, bool mode) 145 struct wpan_dev *wpan_dev, bool mode)
92{ 146{
93 return rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode); 147 int ret;
148
149 trace_802154_rdev_set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
150 ret = rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
151 trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
152 return ret;
94} 153}
95 154
96#endif /* __CFG802154_RDEV_OPS */ 155#endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/trace.c b/net/ieee802154/trace.c
new file mode 100644
index 000000000000..95f997fad755
--- /dev/null
+++ b/net/ieee802154/trace.c
@@ -0,0 +1,7 @@
1#include <linux/module.h>
2
3#ifndef __CHECKER__
4#define CREATE_TRACE_POINTS
5#include "trace.h"
6
7#endif
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
new file mode 100644
index 000000000000..5ac25eb6ed17
--- /dev/null
+++ b/net/ieee802154/trace.h
@@ -0,0 +1,247 @@
1/* Based on net/wireless/tracing.h */
2
3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM cfg802154
5
6#if !defined(__RDEV_CFG802154_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ)
7#define __RDEV_CFG802154_OPS_TRACE
8
9#include <linux/tracepoint.h>
10
11#include <net/cfg802154.h>
12
13#define MAXNAME 32
14#define WPAN_PHY_ENTRY __array(char, wpan_phy_name, MAXNAME)
15#define WPAN_PHY_ASSIGN strlcpy(__entry->wpan_phy_name, \
16 wpan_phy_name(wpan_phy), \
17 MAXNAME)
18#define WPAN_PHY_PR_FMT "%s"
19#define WPAN_PHY_PR_ARG __entry->wpan_phy_name
20
21#define WPAN_DEV_ENTRY __field(u32, identifier)
22#define WPAN_DEV_ASSIGN (__entry->identifier) = (!IS_ERR_OR_NULL(wpan_dev) \
23 ? wpan_dev->identifier : 0)
24#define WPAN_DEV_PR_FMT "wpan_dev(%u)"
25#define WPAN_DEV_PR_ARG (__entry->identifier)
26
27#define WPAN_CCA_ENTRY __field(enum nl802154_cca_modes, cca_mode) \
28 __field(enum nl802154_cca_opts, cca_opt)
29#define WPAN_CCA_ASSIGN \
30 do { \
31 (__entry->cca_mode) = cca->mode; \
32 (__entry->cca_opt) = cca->opt; \
33 } while (0)
34#define WPAN_CCA_PR_FMT "cca_mode: %d, cca_opt: %d"
35#define WPAN_CCA_PR_ARG __entry->cca_mode, __entry->cca_opt
36
37#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
38
39/*************************************************************
40 * rdev->ops traces *
41 *************************************************************/
42
43TRACE_EVENT(802154_rdev_add_virtual_intf,
44 TP_PROTO(struct wpan_phy *wpan_phy, char *name,
45 enum nl802154_iftype type, __le64 extended_addr),
46 TP_ARGS(wpan_phy, name, type, extended_addr),
47 TP_STRUCT__entry(
48 WPAN_PHY_ENTRY
49 __string(vir_intf_name, name ? name : "<noname>")
50 __field(enum nl802154_iftype, type)
51 __field(__le64, extended_addr)
52 ),
53 TP_fast_assign(
54 WPAN_PHY_ASSIGN;
55 __assign_str(vir_intf_name, name ? name : "<noname>");
56 __entry->type = type;
57 __entry->extended_addr = extended_addr;
58 ),
59 TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, ea %llx",
60 WPAN_PHY_PR_ARG, __get_str(vir_intf_name), __entry->type,
61 __le64_to_cpu(__entry->extended_addr))
62);
63
64TRACE_EVENT(802154_rdev_del_virtual_intf,
65 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev),
66 TP_ARGS(wpan_phy, wpan_dev),
67 TP_STRUCT__entry(
68 WPAN_PHY_ENTRY
69 WPAN_DEV_ENTRY
70 ),
71 TP_fast_assign(
72 WPAN_PHY_ASSIGN;
73 WPAN_DEV_ASSIGN;
74 ),
75 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT, WPAN_PHY_PR_ARG,
76 WPAN_DEV_PR_ARG)
77);
78
79TRACE_EVENT(802154_rdev_set_channel,
80 TP_PROTO(struct wpan_phy *wpan_phy, u8 page, u8 channel),
81 TP_ARGS(wpan_phy, page, channel),
82 TP_STRUCT__entry(
83 WPAN_PHY_ENTRY
84 __field(u8, page)
85 __field(u8, channel)
86 ),
87 TP_fast_assign(
88 WPAN_PHY_ASSIGN;
89 __entry->page = page;
90 __entry->channel = channel;
91 ),
92 TP_printk(WPAN_PHY_PR_FMT ", page: %d, channel: %d", WPAN_PHY_PR_ARG,
93 __entry->page, __entry->channel)
94);
95
96TRACE_EVENT(802154_rdev_set_cca_mode,
97 TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
98 TP_ARGS(wpan_phy, cca),
99 TP_STRUCT__entry(
100 WPAN_PHY_ENTRY
101 WPAN_CCA_ENTRY
102 ),
103 TP_fast_assign(
104 WPAN_PHY_ASSIGN;
105 WPAN_CCA_ASSIGN;
106 ),
107 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_CCA_PR_FMT, WPAN_PHY_PR_ARG,
108 WPAN_CCA_PR_ARG)
109);
110
111DECLARE_EVENT_CLASS(802154_le16_template,
112 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
113 __le16 le16arg),
114 TP_ARGS(wpan_phy, wpan_dev, le16arg),
115 TP_STRUCT__entry(
116 WPAN_PHY_ENTRY
117 WPAN_DEV_ENTRY
118 __field(__le16, le16arg)
119 ),
120 TP_fast_assign(
121 WPAN_PHY_ASSIGN;
122 WPAN_DEV_ASSIGN;
123 __entry->le16arg = le16arg;
124 ),
125 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", pan id: 0x%04x",
126 WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
127 __le16_to_cpu(__entry->le16arg))
128);
129
130DEFINE_EVENT(802154_le16_template, 802154_rdev_set_pan_id,
131 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
132 __le16 le16arg),
133 TP_ARGS(wpan_phy, wpan_dev, le16arg)
134);
135
136DEFINE_EVENT_PRINT(802154_le16_template, 802154_rdev_set_short_addr,
137 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
138 __le16 le16arg),
139 TP_ARGS(wpan_phy, wpan_dev, le16arg),
140 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", sa: 0x%04x",
141 WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
142 __le16_to_cpu(__entry->le16arg))
143);
144
145TRACE_EVENT(802154_rdev_set_backoff_exponent,
146 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
147 u8 min_be, u8 max_be),
148 TP_ARGS(wpan_phy, wpan_dev, min_be, max_be),
149 TP_STRUCT__entry(
150 WPAN_PHY_ENTRY
151 WPAN_DEV_ENTRY
152 __field(u8, min_be)
153 __field(u8, max_be)
154 ),
155 TP_fast_assign(
156 WPAN_PHY_ASSIGN;
157 WPAN_DEV_ASSIGN;
158 __entry->min_be = min_be;
159 __entry->max_be = max_be;
160 ),
161
162 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
163 ", min be: %d, max_be: %d", WPAN_PHY_PR_ARG,
164 WPAN_DEV_PR_ARG, __entry->min_be, __entry->max_be)
165);
166
167TRACE_EVENT(802154_rdev_set_csma_backoffs,
168 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
169 u8 max_csma_backoffs),
170 TP_ARGS(wpan_phy, wpan_dev, max_csma_backoffs),
171 TP_STRUCT__entry(
172 WPAN_PHY_ENTRY
173 WPAN_DEV_ENTRY
174 __field(u8, max_csma_backoffs)
175 ),
176 TP_fast_assign(
177 WPAN_PHY_ASSIGN;
178 WPAN_DEV_ASSIGN;
179 __entry->max_csma_backoffs = max_csma_backoffs;
180 ),
181
182 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
183 ", max csma backoffs: %d", WPAN_PHY_PR_ARG,
184 WPAN_DEV_PR_ARG, __entry->max_csma_backoffs)
185);
186
187TRACE_EVENT(802154_rdev_set_max_frame_retries,
188 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
189 s8 max_frame_retries),
190 TP_ARGS(wpan_phy, wpan_dev, max_frame_retries),
191 TP_STRUCT__entry(
192 WPAN_PHY_ENTRY
193 WPAN_DEV_ENTRY
194 __field(s8, max_frame_retries)
195 ),
196 TP_fast_assign(
197 WPAN_PHY_ASSIGN;
198 WPAN_DEV_ASSIGN;
199 __entry->max_frame_retries = max_frame_retries;
200 ),
201
202 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
203 ", max frame retries: %d", WPAN_PHY_PR_ARG,
204 WPAN_DEV_PR_ARG, __entry->max_frame_retries)
205);
206
207TRACE_EVENT(802154_rdev_set_lbt_mode,
208 TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
209 bool mode),
210 TP_ARGS(wpan_phy, wpan_dev, mode),
211 TP_STRUCT__entry(
212 WPAN_PHY_ENTRY
213 WPAN_DEV_ENTRY
214 __field(bool, mode)
215 ),
216 TP_fast_assign(
217 WPAN_PHY_ASSIGN;
218 WPAN_DEV_ASSIGN;
219 __entry->mode = mode;
220 ),
221 TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
222 ", lbt mode: %s", WPAN_PHY_PR_ARG,
223 WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
224);
225
226TRACE_EVENT(802154_rdev_return_int,
227 TP_PROTO(struct wpan_phy *wpan_phy, int ret),
228 TP_ARGS(wpan_phy, ret),
229 TP_STRUCT__entry(
230 WPAN_PHY_ENTRY
231 __field(int, ret)
232 ),
233 TP_fast_assign(
234 WPAN_PHY_ASSIGN;
235 __entry->ret = ret;
236 ),
237 TP_printk(WPAN_PHY_PR_FMT ", returned: %d", WPAN_PHY_PR_ARG,
238 __entry->ret)
239);
240
241#endif /* !__RDEV_CFG802154_OPS_TRACE || TRACE_HEADER_MULTI_READ */
242
243#undef TRACE_INCLUDE_PATH
244#define TRACE_INCLUDE_PATH .
245#undef TRACE_INCLUDE_FILE
246#define TRACE_INCLUDE_FILE trace
247#include <trace/define_trace.h>
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7fde1f265c90..c21777565c58 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -886,22 +886,45 @@ static int ip6_dst_lookup_tail(struct sock *sk,
886#endif 886#endif
887 int err; 887 int err;
888 888
889 if (!*dst) 889 /* The correct way to handle this would be to do
890 *dst = ip6_route_output(net, sk, fl6); 890 * ip6_route_get_saddr, and then ip6_route_output; however,
891 891 * the route-specific preferred source forces the
892 err = (*dst)->error; 892 * ip6_route_output call _before_ ip6_route_get_saddr.
893 if (err) 893 *
894 goto out_err_release; 894 * In source specific routing (no src=any default route),
895 * ip6_route_output will fail given src=any saddr, though, so
896 * that's why we try it again later.
897 */
898 if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
899 struct rt6_info *rt;
900 bool had_dst = *dst != NULL;
895 901
896 if (ipv6_addr_any(&fl6->saddr)) { 902 if (!had_dst)
897 struct rt6_info *rt = (struct rt6_info *) *dst; 903 *dst = ip6_route_output(net, sk, fl6);
904 rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
898 err = ip6_route_get_saddr(net, rt, &fl6->daddr, 905 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
899 sk ? inet6_sk(sk)->srcprefs : 0, 906 sk ? inet6_sk(sk)->srcprefs : 0,
900 &fl6->saddr); 907 &fl6->saddr);
901 if (err) 908 if (err)
902 goto out_err_release; 909 goto out_err_release;
910
911 /* If we had an erroneous initial result, pretend it
912 * never existed and let the SA-enabled version take
913 * over.
914 */
915 if (!had_dst && (*dst)->error) {
916 dst_release(*dst);
917 *dst = NULL;
918 }
903 } 919 }
904 920
921 if (!*dst)
922 *dst = ip6_route_output(net, sk, fl6);
923
924 err = (*dst)->error;
925 if (err)
926 goto out_err_release;
927
905#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 928#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
906 /* 929 /*
907 * Here if the dst entry we've looked up 930 * Here if the dst entry we've looked up
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 106dbe5140f1..6f4a35096bde 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2235,9 +2235,10 @@ int ip6_route_get_saddr(struct net *net,
2235 unsigned int prefs, 2235 unsigned int prefs,
2236 struct in6_addr *saddr) 2236 struct in6_addr *saddr)
2237{ 2237{
2238 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); 2238 struct inet6_dev *idev =
2239 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2239 int err = 0; 2240 int err = 0;
2240 if (rt->rt6i_prefsrc.plen) 2241 if (rt && rt->rt6i_prefsrc.plen)
2241 *saddr = rt->rt6i_prefsrc.addr; 2242 *saddr = rt->rt6i_prefsrc.addr;
2242 else 2243 else
2243 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2244 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index dc2d7133c4f6..4ee8fea263ed 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -816,13 +816,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
816 * (because if we remove a STA after ops->remove_interface() 816 * (because if we remove a STA after ops->remove_interface()
817 * the driver will have removed the vif info already!) 817 * the driver will have removed the vif info already!)
818 * 818 *
819 * This is relevant only in WDS mode, in all other modes we've 819 * In WDS mode a station must exist here and be flushed, for
820 * already removed all stations when disconnecting or similar, 820 * AP_VLANs stations may exist since there's nothing else that
821 * so warn otherwise. 821 * would have removed them, but in other modes there shouldn't
822 * be any stations.
822 */ 823 */
823 flushed = sta_info_flush(sdata); 824 flushed = sta_info_flush(sdata);
824 WARN_ON_ONCE((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) || 825 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
825 (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)); 826 ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
827 (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
826 828
827 /* don't count this interface for allmulti while it is down */ 829 /* don't count this interface for allmulti while it is down */
828 if (sdata->flags & IEEE80211_SDATA_ALLMULTI) 830 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index aec15d746aea..ce0c1662de42 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -66,6 +66,7 @@
66 66
67static const struct rhashtable_params sta_rht_params = { 67static const struct rhashtable_params sta_rht_params = {
68 .nelem_hint = 3, /* start small */ 68 .nelem_hint = 3, /* start small */
69 .automatic_shrinking = true,
69 .head_offset = offsetof(struct sta_info, hash_node), 70 .head_offset = offsetof(struct sta_info, hash_node),
70 .key_offset = offsetof(struct sta_info, sta.addr), 71 .key_offset = offsetof(struct sta_info, sta.addr),
71 .key_len = ETH_ALEN, 72 .key_len = ETH_ALEN,
@@ -158,8 +159,24 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
158 const u8 *addr) 159 const u8 *addr)
159{ 160{
160 struct ieee80211_local *local = sdata->local; 161 struct ieee80211_local *local = sdata->local;
162 struct sta_info *sta;
163 struct rhash_head *tmp;
164 const struct bucket_table *tbl;
165
166 rcu_read_lock();
167 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
161 168
162 return rhashtable_lookup_fast(&local->sta_hash, addr, sta_rht_params); 169 for_each_sta_info(local, tbl, addr, sta, tmp) {
170 if (sta->sdata == sdata) {
171 rcu_read_unlock();
172 /* this is safe as the caller must already hold
173 * another rcu read section or the mutex
174 */
175 return sta;
176 }
177 }
178 rcu_read_unlock();
179 return NULL;
163} 180}
164 181
165/* 182/*
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 5d9f68c75e5f..70be9c799f8a 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -22,13 +22,14 @@
22 22
23static struct net_device * 23static struct net_device *
24ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy, 24ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy,
25 const char *name, int type) 25 const char *name,
26 unsigned char name_assign_type, int type)
26{ 27{
27 struct ieee802154_local *local = wpan_phy_priv(wpan_phy); 28 struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
28 struct net_device *dev; 29 struct net_device *dev;
29 30
30 rtnl_lock(); 31 rtnl_lock();
31 dev = ieee802154_if_add(local, name, type, 32 dev = ieee802154_if_add(local, name, name_assign_type, type,
32 cpu_to_le64(0x0000000000000000ULL)); 33 cpu_to_le64(0x0000000000000000ULL));
33 rtnl_unlock(); 34 rtnl_unlock();
34 35
@@ -45,12 +46,14 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
45 46
46static int 47static int
47ieee802154_add_iface(struct wpan_phy *phy, const char *name, 48ieee802154_add_iface(struct wpan_phy *phy, const char *name,
49 unsigned char name_assign_type,
48 enum nl802154_iftype type, __le64 extended_addr) 50 enum nl802154_iftype type, __le64 extended_addr)
49{ 51{
50 struct ieee802154_local *local = wpan_phy_priv(phy); 52 struct ieee802154_local *local = wpan_phy_priv(phy);
51 struct net_device *err; 53 struct net_device *err;
52 54
53 err = ieee802154_if_add(local, name, type, extended_addr); 55 err = ieee802154_if_add(local, name, name_assign_type, type,
56 extended_addr);
54 return PTR_ERR_OR_ZERO(err); 57 return PTR_ERR_OR_ZERO(err);
55} 58}
56 59
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index bebd70ffc7a3..127ba18386fc 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -182,7 +182,8 @@ void ieee802154_iface_exit(void);
182void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata); 182void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata);
183struct net_device * 183struct net_device *
184ieee802154_if_add(struct ieee802154_local *local, const char *name, 184ieee802154_if_add(struct ieee802154_local *local, const char *name,
185 enum nl802154_iftype type, __le64 extended_addr); 185 unsigned char name_assign_type, enum nl802154_iftype type,
186 __le64 extended_addr);
186void ieee802154_remove_interfaces(struct ieee802154_local *local); 187void ieee802154_remove_interfaces(struct ieee802154_local *local);
187 188
188#endif /* __IEEE802154_I_H */ 189#endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 38b56f9d9386..91b75abbd1a1 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -522,7 +522,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
522 522
523struct net_device * 523struct net_device *
524ieee802154_if_add(struct ieee802154_local *local, const char *name, 524ieee802154_if_add(struct ieee802154_local *local, const char *name,
525 enum nl802154_iftype type, __le64 extended_addr) 525 unsigned char name_assign_type, enum nl802154_iftype type,
526 __le64 extended_addr)
526{ 527{
527 struct net_device *ndev = NULL; 528 struct net_device *ndev = NULL;
528 struct ieee802154_sub_if_data *sdata = NULL; 529 struct ieee802154_sub_if_data *sdata = NULL;
@@ -531,7 +532,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
531 ASSERT_RTNL(); 532 ASSERT_RTNL();
532 533
533 ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name, 534 ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name,
534 NET_NAME_UNKNOWN, ieee802154_if_setup); 535 name_assign_type, ieee802154_if_setup);
535 if (!ndev) 536 if (!ndev)
536 return ERR_PTR(-ENOMEM); 537 return ERR_PTR(-ENOMEM);
537 538
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index dcf73958133a..5b2be12832e6 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -134,7 +134,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
134 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) { 134 for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
135 key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0, 135 key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
136 CRYPTO_ALG_ASYNC); 136 CRYPTO_ALG_ASYNC);
137 if (!key->tfm[i]) 137 if (IS_ERR(key->tfm[i]))
138 goto err_tfm; 138 goto err_tfm;
139 if (crypto_aead_setkey(key->tfm[i], template->key, 139 if (crypto_aead_setkey(key->tfm[i], template->key,
140 IEEE802154_LLSEC_KEY_SIZE)) 140 IEEE802154_LLSEC_KEY_SIZE))
@@ -144,7 +144,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
144 } 144 }
145 145
146 key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 146 key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
147 if (!key->tfm0) 147 if (IS_ERR(key->tfm0))
148 goto err_tfm; 148 goto err_tfm;
149 149
150 if (crypto_blkcipher_setkey(key->tfm0, template->key, 150 if (crypto_blkcipher_setkey(key->tfm0, template->key,
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 8500378c8318..08cb32dc8fd3 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -161,18 +161,21 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
161 161
162 rtnl_lock(); 162 rtnl_lock();
163 163
164 dev = ieee802154_if_add(local, "wpan%d", NL802154_IFTYPE_NODE, 164 dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM,
165 NL802154_IFTYPE_NODE,
165 cpu_to_le64(0x0000000000000000ULL)); 166 cpu_to_le64(0x0000000000000000ULL));
166 if (IS_ERR(dev)) { 167 if (IS_ERR(dev)) {
167 rtnl_unlock(); 168 rtnl_unlock();
168 rc = PTR_ERR(dev); 169 rc = PTR_ERR(dev);
169 goto out_wq; 170 goto out_phy;
170 } 171 }
171 172
172 rtnl_unlock(); 173 rtnl_unlock();
173 174
174 return 0; 175 return 0;
175 176
177out_phy:
178 wpan_phy_unregister(local->phy);
176out_wq: 179out_wq:
177 destroy_workqueue(local->workqueue); 180 destroy_workqueue(local->workqueue);
178out: 181out:
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 954810c76a86..7b3f732269e4 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -647,7 +647,7 @@ int nla_get_labels(const struct nlattr *nla,
647 return -EINVAL; 647 return -EINVAL;
648 648
649 switch (dec.label) { 649 switch (dec.label) {
650 case LABEL_IMPLICIT_NULL: 650 case MPLS_LABEL_IMPLNULL:
651 /* RFC3032: This is a label that an LSR may 651 /* RFC3032: This is a label that an LSR may
652 * assign and distribute, but which never 652 * assign and distribute, but which never
653 * actually appears in the encapsulation. 653 * actually appears in the encapsulation.
@@ -935,7 +935,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
935 } 935 }
936 936
937 /* In case the predefined labels need to be populated */ 937 /* In case the predefined labels need to be populated */
938 if (limit > LABEL_IPV4_EXPLICIT_NULL) { 938 if (limit > MPLS_LABEL_IPV4NULL) {
939 struct net_device *lo = net->loopback_dev; 939 struct net_device *lo = net->loopback_dev;
940 rt0 = mpls_rt_alloc(lo->addr_len); 940 rt0 = mpls_rt_alloc(lo->addr_len);
941 if (!rt0) 941 if (!rt0)
@@ -945,7 +945,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
945 rt0->rt_via_table = NEIGH_LINK_TABLE; 945 rt0->rt_via_table = NEIGH_LINK_TABLE;
946 memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len); 946 memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
947 } 947 }
948 if (limit > LABEL_IPV6_EXPLICIT_NULL) { 948 if (limit > MPLS_LABEL_IPV6NULL) {
949 struct net_device *lo = net->loopback_dev; 949 struct net_device *lo = net->loopback_dev;
950 rt2 = mpls_rt_alloc(lo->addr_len); 950 rt2 = mpls_rt_alloc(lo->addr_len);
951 if (!rt2) 951 if (!rt2)
@@ -973,15 +973,15 @@ static int resize_platform_label_table(struct net *net, size_t limit)
973 memcpy(labels, old, cp_size); 973 memcpy(labels, old, cp_size);
974 974
975 /* If needed set the predefined labels */ 975 /* If needed set the predefined labels */
976 if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) && 976 if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
977 (limit > LABEL_IPV6_EXPLICIT_NULL)) { 977 (limit > MPLS_LABEL_IPV6NULL)) {
978 RCU_INIT_POINTER(labels[LABEL_IPV6_EXPLICIT_NULL], rt2); 978 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
979 rt2 = NULL; 979 rt2 = NULL;
980 } 980 }
981 981
982 if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) && 982 if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
983 (limit > LABEL_IPV4_EXPLICIT_NULL)) { 983 (limit > MPLS_LABEL_IPV4NULL)) {
984 RCU_INIT_POINTER(labels[LABEL_IPV4_EXPLICIT_NULL], rt0); 984 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
985 rt0 = NULL; 985 rt0 = NULL;
986 } 986 }
987 987
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 693877d69606..b064c345042c 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -1,16 +1,6 @@
1#ifndef MPLS_INTERNAL_H 1#ifndef MPLS_INTERNAL_H
2#define MPLS_INTERNAL_H 2#define MPLS_INTERNAL_H
3 3
4#define LABEL_IPV4_EXPLICIT_NULL 0 /* RFC3032 */
5#define LABEL_ROUTER_ALERT_LABEL 1 /* RFC3032 */
6#define LABEL_IPV6_EXPLICIT_NULL 2 /* RFC3032 */
7#define LABEL_IMPLICIT_NULL 3 /* RFC3032 */
8#define LABEL_ENTROPY_INDICATOR 7 /* RFC6790 */
9#define LABEL_GAL 13 /* RFC5586 */
10#define LABEL_OAM_ALERT 14 /* RFC3429 */
11#define LABEL_EXTENSION 15 /* RFC7274 */
12
13
14struct mpls_shim_hdr { 4struct mpls_shim_hdr {
15 __be32 label_stack_entry; 5 __be32 label_stack_entry;
16}; 6};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e9b9559731bb..164ded7050b8 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -3179,7 +3179,6 @@ static const struct rhashtable_params netlink_rhashtable_params = {
3179 .key_len = netlink_compare_arg_len, 3179 .key_len = netlink_compare_arg_len,
3180 .obj_hashfn = netlink_hash, 3180 .obj_hashfn = netlink_hash,
3181 .obj_cmpfn = netlink_compare, 3181 .obj_cmpfn = netlink_compare,
3182 .max_size = 65536,
3183 .automatic_shrinking = true, 3182 .automatic_shrinking = true,
3184}; 3183};
3185 3184
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 94713276a1d9..12c5dde8e344 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2311,11 +2311,14 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2311 tlen = dev->needed_tailroom; 2311 tlen = dev->needed_tailroom;
2312 skb = sock_alloc_send_skb(&po->sk, 2312 skb = sock_alloc_send_skb(&po->sk,
2313 hlen + tlen + sizeof(struct sockaddr_ll), 2313 hlen + tlen + sizeof(struct sockaddr_ll),
2314 0, &err); 2314 !need_wait, &err);
2315 2315
2316 if (unlikely(skb == NULL)) 2316 if (unlikely(skb == NULL)) {
2317 /* we assume the socket was initially writeable ... */
2318 if (likely(len_sum > 0))
2319 err = len_sum;
2317 goto out_status; 2320 goto out_status;
2318 2321 }
2319 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2322 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2320 addr, hlen); 2323 addr, hlen);
2321 if (tp_len > dev->mtu + dev->hard_header_len) { 2324 if (tp_len > dev->mtu + dev->hard_header_len) {
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 14f041398ca1..da6da57e5f36 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -126,7 +126,10 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
126 struct rds_transport *loop_trans; 126 struct rds_transport *loop_trans;
127 unsigned long flags; 127 unsigned long flags;
128 int ret; 128 int ret;
129 struct rds_transport *otrans = trans;
129 130
131 if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
132 goto new_conn;
130 rcu_read_lock(); 133 rcu_read_lock();
131 conn = rds_conn_lookup(head, laddr, faddr, trans); 134 conn = rds_conn_lookup(head, laddr, faddr, trans);
132 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 135 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
@@ -142,6 +145,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
142 if (conn) 145 if (conn)
143 goto out; 146 goto out;
144 147
148new_conn:
145 conn = kmem_cache_zalloc(rds_conn_slab, gfp); 149 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
146 if (!conn) { 150 if (!conn) {
147 conn = ERR_PTR(-ENOMEM); 151 conn = ERR_PTR(-ENOMEM);
@@ -230,13 +234,22 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
230 /* Creating normal conn */ 234 /* Creating normal conn */
231 struct rds_connection *found; 235 struct rds_connection *found;
232 236
233 found = rds_conn_lookup(head, laddr, faddr, trans); 237 if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
238 found = NULL;
239 else
240 found = rds_conn_lookup(head, laddr, faddr, trans);
234 if (found) { 241 if (found) {
235 trans->conn_free(conn->c_transport_data); 242 trans->conn_free(conn->c_transport_data);
236 kmem_cache_free(rds_conn_slab, conn); 243 kmem_cache_free(rds_conn_slab, conn);
237 conn = found; 244 conn = found;
238 } else { 245 } else {
239 hlist_add_head_rcu(&conn->c_hash_node, head); 246 if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
247 (otrans->t_type != RDS_TRANS_TCP)) {
248 /* Only the active side should be added to
249 * reconnect list for TCP.
250 */
251 hlist_add_head_rcu(&conn->c_hash_node, head);
252 }
240 rds_cong_add_conn(conn); 253 rds_cong_add_conn(conn);
241 rds_conn_count++; 254 rds_conn_count++;
242 } 255 }
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 31b74f5e61ad..8a09ee7db3c1 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -183,8 +183,17 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
183 183
184 /* If the peer gave us the last packet it saw, process this as if 184 /* If the peer gave us the last packet it saw, process this as if
185 * we had received a regular ACK. */ 185 * we had received a regular ACK. */
186 if (dp && dp->dp_ack_seq) 186 if (dp) {
187 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); 187 /* dp structure start is not guaranteed to be 8 bytes aligned.
188 * Since dp_ack_seq is 64-bit extended load operations can be
189 * used so go through get_unaligned to avoid unaligned errors.
190 */
191 __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
192
193 if (dp_ack_seq)
194 rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
195 NULL);
196 }
188 197
189 rds_connect_complete(conn); 198 rds_connect_complete(conn);
190} 199}
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index f9f564a6c960..973109c7b8e8 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -62,6 +62,7 @@ void rds_tcp_state_change(struct sock *sk)
62 case TCP_ESTABLISHED: 62 case TCP_ESTABLISHED:
63 rds_connect_complete(conn); 63 rds_connect_complete(conn);
64 break; 64 break;
65 case TCP_CLOSE_WAIT:
65 case TCP_CLOSE: 66 case TCP_CLOSE:
66 rds_conn_drop(conn); 67 rds_conn_drop(conn);
67 default: 68 default:
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 23ab4dcd1d9f..0da49e34495f 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -45,12 +45,45 @@ static void rds_tcp_accept_worker(struct work_struct *work);
45static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker); 45static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
46static struct socket *rds_tcp_listen_sock; 46static struct socket *rds_tcp_listen_sock;
47 47
48static int rds_tcp_keepalive(struct socket *sock)
49{
50 /* values below based on xs_udp_default_timeout */
51 int keepidle = 5; /* send a probe 'keepidle' secs after last data */
52 int keepcnt = 5; /* number of unack'ed probes before declaring dead */
53 int keepalive = 1;
54 int ret = 0;
55
56 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
57 (char *)&keepalive, sizeof(keepalive));
58 if (ret < 0)
59 goto bail;
60
61 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT,
62 (char *)&keepcnt, sizeof(keepcnt));
63 if (ret < 0)
64 goto bail;
65
66 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE,
67 (char *)&keepidle, sizeof(keepidle));
68 if (ret < 0)
69 goto bail;
70
71 /* KEEPINTVL is the interval between successive probes. We follow
72 * the model in xs_tcp_finish_connecting() and re-use keepidle.
73 */
74 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL,
75 (char *)&keepidle, sizeof(keepidle));
76bail:
77 return ret;
78}
79
48static int rds_tcp_accept_one(struct socket *sock) 80static int rds_tcp_accept_one(struct socket *sock)
49{ 81{
50 struct socket *new_sock = NULL; 82 struct socket *new_sock = NULL;
51 struct rds_connection *conn; 83 struct rds_connection *conn;
52 int ret; 84 int ret;
53 struct inet_sock *inet; 85 struct inet_sock *inet;
86 struct rds_tcp_connection *rs_tcp;
54 87
55 ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, 88 ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
56 sock->sk->sk_protocol, &new_sock); 89 sock->sk->sk_protocol, &new_sock);
@@ -63,6 +96,10 @@ static int rds_tcp_accept_one(struct socket *sock)
63 if (ret < 0) 96 if (ret < 0)
64 goto out; 97 goto out;
65 98
99 ret = rds_tcp_keepalive(new_sock);
100 if (ret < 0)
101 goto out;
102
66 rds_tcp_tune(new_sock); 103 rds_tcp_tune(new_sock);
67 104
68 inet = inet_sk(new_sock->sk); 105 inet = inet_sk(new_sock->sk);
@@ -77,6 +114,15 @@ static int rds_tcp_accept_one(struct socket *sock)
77 ret = PTR_ERR(conn); 114 ret = PTR_ERR(conn);
78 goto out; 115 goto out;
79 } 116 }
117 /* An incoming SYN request came in, and TCP just accepted it.
118 * We always create a new conn for listen side of TCP, and do not
119 * add it to the c_hash_list.
120 *
121 * If the client reboots, this conn will need to be cleaned up.
122 * rds_tcp_state_change() will do that cleanup
123 */
124 rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
125 WARN_ON(!rs_tcp || rs_tcp->t_sock);
80 126
81 /* 127 /*
82 * see the comment above rds_queue_delayed_reconnect() 128 * see the comment above rds_queue_delayed_reconnect()
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8b0470e418dc..b6ef9a04de06 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -308,12 +308,11 @@ replay:
308 case RTM_DELTFILTER: 308 case RTM_DELTFILTER:
309 err = tp->ops->delete(tp, fh); 309 err = tp->ops->delete(tp, fh);
310 if (err == 0) { 310 if (err == 0) {
311 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); 311 struct tcf_proto *next = rtnl_dereference(tp->next);
312 if (tcf_destroy(tp, false)) {
313 struct tcf_proto *next = rtnl_dereference(tp->next);
314 312
313 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
314 if (tcf_destroy(tp, false))
315 RCU_INIT_POINTER(*back, next); 315 RCU_INIT_POINTER(*back, next);
316 }
317 } 316 }
318 goto errout; 317 goto errout;
319 case RTM_GETTFILTER: 318 case RTM_GETTFILTER:
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 1474b6560fac..535007d5f0b5 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -171,7 +171,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
171 171
172 sch->limit = DEFAULT_CODEL_LIMIT; 172 sch->limit = DEFAULT_CODEL_LIMIT;
173 173
174 codel_params_init(&q->params); 174 codel_params_init(&q->params, sch);
175 codel_vars_init(&q->vars); 175 codel_vars_init(&q->vars);
176 codel_stats_init(&q->stats); 176 codel_stats_init(&q->stats);
177 177
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 778739786b32..d75993f89fac 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -391,7 +391,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
391 q->perturbation = prandom_u32(); 391 q->perturbation = prandom_u32();
392 INIT_LIST_HEAD(&q->new_flows); 392 INIT_LIST_HEAD(&q->new_flows);
393 INIT_LIST_HEAD(&q->old_flows); 393 INIT_LIST_HEAD(&q->old_flows);
394 codel_params_init(&q->cparams); 394 codel_params_init(&q->cparams, sch);
395 codel_stats_init(&q->cstats); 395 codel_stats_init(&q->cstats);
396 q->cparams.ecn = true; 396 q->cparams.ecn = true;
397 397
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 826e2994152b..abb9f2fec28f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -230,7 +230,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
230 break; 230 break;
231 } 231 }
232 232
233 if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { 233 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
234 q->backlog += qdisc_pkt_len(skb); 234 q->backlog += qdisc_pkt_len(skb);
235 return qdisc_enqueue_tail(skb, sch); 235 return qdisc_enqueue_tail(skb, sch);
236 } 236 }
@@ -573,7 +573,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
573 573
574 opt.limit = q->limit; 574 opt.limit = q->limit;
575 opt.DP = q->DP; 575 opt.DP = q->DP;
576 opt.backlog = q->backlog; 576 opt.backlog = gred_backlog(table, q, sch);
577 opt.prio = q->prio; 577 opt.prio = q->prio;
578 opt.qth_min = q->parms.qth_min >> q->parms.Wlog; 578 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
579 opt.qth_max = q->parms.qth_max >> q->parms.Wlog; 579 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 1ec19f6f0c2b..eeeba5adee6d 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
793{ 793{
794 u32 value_follows; 794 u32 value_follows;
795 int err; 795 int err;
796 struct page *scratch;
797
798 scratch = alloc_page(GFP_KERNEL);
799 if (!scratch)
800 return -ENOMEM;
801 xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
796 802
797 /* res->status */ 803 /* res->status */
798 err = gssx_dec_status(xdr, &res->status); 804 err = gssx_dec_status(xdr, &res->status);
799 if (err) 805 if (err)
800 return err; 806 goto out_free;
801 807
802 /* res->context_handle */ 808 /* res->context_handle */
803 err = gssx_dec_bool(xdr, &value_follows); 809 err = gssx_dec_bool(xdr, &value_follows);
804 if (err) 810 if (err)
805 return err; 811 goto out_free;
806 if (value_follows) { 812 if (value_follows) {
807 err = gssx_dec_ctx(xdr, res->context_handle); 813 err = gssx_dec_ctx(xdr, res->context_handle);
808 if (err) 814 if (err)
809 return err; 815 goto out_free;
810 } else { 816 } else {
811 res->context_handle = NULL; 817 res->context_handle = NULL;
812 } 818 }
@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
814 /* res->output_token */ 820 /* res->output_token */
815 err = gssx_dec_bool(xdr, &value_follows); 821 err = gssx_dec_bool(xdr, &value_follows);
816 if (err) 822 if (err)
817 return err; 823 goto out_free;
818 if (value_follows) { 824 if (value_follows) {
819 err = gssx_dec_buffer(xdr, res->output_token); 825 err = gssx_dec_buffer(xdr, res->output_token);
820 if (err) 826 if (err)
821 return err; 827 goto out_free;
822 } else { 828 } else {
823 res->output_token = NULL; 829 res->output_token = NULL;
824 } 830 }
@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
826 /* res->delegated_cred_handle */ 832 /* res->delegated_cred_handle */
827 err = gssx_dec_bool(xdr, &value_follows); 833 err = gssx_dec_bool(xdr, &value_follows);
828 if (err) 834 if (err)
829 return err; 835 goto out_free;
830 if (value_follows) { 836 if (value_follows) {
831 /* we do not support upcall servers sending this data. */ 837 /* we do not support upcall servers sending this data. */
832 return -EINVAL; 838 err = -EINVAL;
839 goto out_free;
833 } 840 }
834 841
835 /* res->options */ 842 /* res->options */
836 err = gssx_dec_option_array(xdr, &res->options); 843 err = gssx_dec_option_array(xdr, &res->options);
837 844
845out_free:
846 __free_page(scratch);
838 return err; 847 return err;
839} 848}
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index d8fe29fc19a4..8bd960658463 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -16,7 +16,7 @@ MAKEFLAGS += --no-print-directory
16LIBFILE = $(OUTPUT)libapi.a 16LIBFILE = $(OUTPUT)libapi.a
17 17
18CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 18CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
19CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 -fPIC 19CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
20CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 20CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
21 21
22RM = rm -f 22RM = rm -f
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index e0917c0f5d9f..29f94f6f0d9e 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -3865,7 +3865,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
3865 } else if (el_size == 4) { 3865 } else if (el_size == 4) {
3866 trace_seq_printf(s, "%u", *(uint32_t *)num); 3866 trace_seq_printf(s, "%u", *(uint32_t *)num);
3867 } else if (el_size == 8) { 3867 } else if (el_size == 8) {
3868 trace_seq_printf(s, "%lu", *(uint64_t *)num); 3868 trace_seq_printf(s, "%"PRIu64, *(uint64_t *)num);
3869 } else { 3869 } else {
3870 trace_seq_printf(s, "BAD SIZE:%d 0x%x", 3870 trace_seq_printf(s, "BAD SIZE:%d 0x%x",
3871 el_size, *(uint8_t *)num); 3871 el_size, *(uint8_t *)num);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index bedff6b5b3cf..ad0d9b5342fb 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -132,6 +132,9 @@ int bench_futex_requeue(int argc, const char **argv,
132 if (!fshared) 132 if (!fshared)
133 futex_flag = FUTEX_PRIVATE_FLAG; 133 futex_flag = FUTEX_PRIVATE_FLAG;
134 134
135 if (nrequeue > nthreads)
136 nrequeue = nthreads;
137
135 printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), " 138 printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
136 "%d at a time.\n\n", getpid(), nthreads, 139 "%d at a time.\n\n", getpid(), nthreads,
137 fshared ? "shared":"private", &futex1, &futex2, nrequeue); 140 fshared ? "shared":"private", &futex1, &futex2, nrequeue);
@@ -161,20 +164,18 @@ int bench_futex_requeue(int argc, const char **argv,
161 164
162 /* Ok, all threads are patiently blocked, start requeueing */ 165 /* Ok, all threads are patiently blocked, start requeueing */
163 gettimeofday(&start, NULL); 166 gettimeofday(&start, NULL);
164 for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) { 167 while (nrequeued < nthreads) {
165 /* 168 /*
166 * Do not wakeup any tasks blocked on futex1, allowing 169 * Do not wakeup any tasks blocked on futex1, allowing
167 * us to really measure futex_wait functionality. 170 * us to really measure futex_wait functionality.
168 */ 171 */
169 futex_cmp_requeue(&futex1, 0, &futex2, 0, 172 nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
170 nrequeue, futex_flag); 173 nrequeue, futex_flag);
171 } 174 }
175
172 gettimeofday(&end, NULL); 176 gettimeofday(&end, NULL);
173 timersub(&end, &start, &runtime); 177 timersub(&end, &start, &runtime);
174 178
175 if (nrequeued > nthreads)
176 nrequeued = nthreads;
177
178 update_stats(&requeued_stats, nrequeued); 179 update_stats(&requeued_stats, nrequeued);
179 update_stats(&requeuetime_stats, runtime.tv_usec); 180 update_stats(&requeuetime_stats, runtime.tv_usec);
180 181
@@ -184,7 +185,7 @@ int bench_futex_requeue(int argc, const char **argv,
184 } 185 }
185 186
186 /* everybody should be blocked on futex2, wake'em up */ 187 /* everybody should be blocked on futex2, wake'em up */
187 nrequeued = futex_wake(&futex2, nthreads, futex_flag); 188 nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
188 if (nthreads != nrequeued) 189 if (nthreads != nrequeued)
189 warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads); 190 warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
190 191
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index ebfa163b80b5..ba5efa4710b5 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -180,7 +180,7 @@ static const struct option options[] = {
180 OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), 180 OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
181 OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"), 181 OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"),
182 OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), 182 OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
183 OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"), 183 OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"),
184 OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), 184 OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
185 185
186 /* Special option string parsing callbacks: */ 186 /* Special option string parsing callbacks: */
@@ -828,6 +828,9 @@ static int count_process_nodes(int process_nr)
828 td = g->threads + task_nr; 828 td = g->threads + task_nr;
829 829
830 node = numa_node_of_cpu(td->curr_cpu); 830 node = numa_node_of_cpu(td->curr_cpu);
831 if (node < 0) /* curr_cpu was likely still -1 */
832 return 0;
833
831 node_present[node] = 1; 834 node_present[node] = 1;
832 } 835 }
833 836
@@ -882,6 +885,11 @@ static void calc_convergence_compression(int *strong)
882 for (p = 0; p < g->p.nr_proc; p++) { 885 for (p = 0; p < g->p.nr_proc; p++) {
883 unsigned int nodes = count_process_nodes(p); 886 unsigned int nodes = count_process_nodes(p);
884 887
888 if (!nodes) {
889 *strong = 0;
890 return;
891 }
892
885 nodes_min = min(nodes, nodes_min); 893 nodes_min = min(nodes, nodes_min);
886 nodes_max = max(nodes, nodes_max); 894 nodes_max = max(nodes, nodes_max);
887 } 895 }
@@ -1395,7 +1403,7 @@ static void print_res(const char *name, double val,
1395 if (!name) 1403 if (!name)
1396 name = "main,"; 1404 name = "main,";
1397 1405
1398 if (g->p.show_quiet) 1406 if (!g->p.show_quiet)
1399 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); 1407 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1400 else 1408 else
1401 printf(" %14.3f %s\n", val, txt_long); 1409 printf(" %14.3f %s\n", val, txt_long);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 63ea01349b6e..1634186d537c 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -319,7 +319,7 @@ static int page_stat_cmp(struct page_stat *a, struct page_stat *b)
319 return 0; 319 return 0;
320} 320}
321 321
322static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool create) 322static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create)
323{ 323{
324 struct rb_node **node = &page_alloc_tree.rb_node; 324 struct rb_node **node = &page_alloc_tree.rb_node;
325 struct rb_node *parent = NULL; 325 struct rb_node *parent = NULL;
@@ -331,7 +331,7 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre
331 parent = *node; 331 parent = *node;
332 data = rb_entry(*node, struct page_stat, node); 332 data = rb_entry(*node, struct page_stat, node);
333 333
334 cmp = page_stat_cmp(data, stat); 334 cmp = page_stat_cmp(data, pstat);
335 if (cmp < 0) 335 if (cmp < 0)
336 node = &parent->rb_left; 336 node = &parent->rb_left;
337 else if (cmp > 0) 337 else if (cmp > 0)
@@ -345,10 +345,10 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre
345 345
346 data = zalloc(sizeof(*data)); 346 data = zalloc(sizeof(*data));
347 if (data != NULL) { 347 if (data != NULL) {
348 data->page = stat->page; 348 data->page = pstat->page;
349 data->order = stat->order; 349 data->order = pstat->order;
350 data->gfp_flags = stat->gfp_flags; 350 data->gfp_flags = pstat->gfp_flags;
351 data->migrate_type = stat->migrate_type; 351 data->migrate_type = pstat->migrate_type;
352 352
353 rb_link_node(&data->node, parent, node); 353 rb_link_node(&data->node, parent, node);
354 rb_insert_color(&data->node, &page_alloc_tree); 354 rb_insert_color(&data->node, &page_alloc_tree);
@@ -375,7 +375,7 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
375 unsigned int migrate_type = perf_evsel__intval(evsel, sample, 375 unsigned int migrate_type = perf_evsel__intval(evsel, sample,
376 "migratetype"); 376 "migratetype");
377 u64 bytes = kmem_page_size << order; 377 u64 bytes = kmem_page_size << order;
378 struct page_stat *stat; 378 struct page_stat *pstat;
379 struct page_stat this = { 379 struct page_stat this = {
380 .order = order, 380 .order = order,
381 .gfp_flags = gfp_flags, 381 .gfp_flags = gfp_flags,
@@ -401,21 +401,21 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
401 * This is to find the current page (with correct gfp flags and 401 * This is to find the current page (with correct gfp flags and
402 * migrate type) at free event. 402 * migrate type) at free event.
403 */ 403 */
404 stat = search_page(page, true); 404 pstat = search_page(page, true);
405 if (stat == NULL) 405 if (pstat == NULL)
406 return -ENOMEM; 406 return -ENOMEM;
407 407
408 stat->order = order; 408 pstat->order = order;
409 stat->gfp_flags = gfp_flags; 409 pstat->gfp_flags = gfp_flags;
410 stat->migrate_type = migrate_type; 410 pstat->migrate_type = migrate_type;
411 411
412 this.page = page; 412 this.page = page;
413 stat = search_page_alloc_stat(&this, true); 413 pstat = search_page_alloc_stat(&this, true);
414 if (stat == NULL) 414 if (pstat == NULL)
415 return -ENOMEM; 415 return -ENOMEM;
416 416
417 stat->nr_alloc++; 417 pstat->nr_alloc++;
418 stat->alloc_bytes += bytes; 418 pstat->alloc_bytes += bytes;
419 419
420 order_stats[order][migrate_type]++; 420 order_stats[order][migrate_type]++;
421 421
@@ -428,7 +428,7 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
428 u64 page; 428 u64 page;
429 unsigned int order = perf_evsel__intval(evsel, sample, "order"); 429 unsigned int order = perf_evsel__intval(evsel, sample, "order");
430 u64 bytes = kmem_page_size << order; 430 u64 bytes = kmem_page_size << order;
431 struct page_stat *stat; 431 struct page_stat *pstat;
432 struct page_stat this = { 432 struct page_stat this = {
433 .order = order, 433 .order = order,
434 }; 434 };
@@ -441,8 +441,8 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
441 nr_page_frees++; 441 nr_page_frees++;
442 total_page_free_bytes += bytes; 442 total_page_free_bytes += bytes;
443 443
444 stat = search_page(page, false); 444 pstat = search_page(page, false);
445 if (stat == NULL) { 445 if (pstat == NULL) {
446 pr_debug2("missing free at page %"PRIx64" (order: %d)\n", 446 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
447 page, order); 447 page, order);
448 448
@@ -453,18 +453,18 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
453 } 453 }
454 454
455 this.page = page; 455 this.page = page;
456 this.gfp_flags = stat->gfp_flags; 456 this.gfp_flags = pstat->gfp_flags;
457 this.migrate_type = stat->migrate_type; 457 this.migrate_type = pstat->migrate_type;
458 458
459 rb_erase(&stat->node, &page_tree); 459 rb_erase(&pstat->node, &page_tree);
460 free(stat); 460 free(pstat);
461 461
462 stat = search_page_alloc_stat(&this, false); 462 pstat = search_page_alloc_stat(&this, false);
463 if (stat == NULL) 463 if (pstat == NULL)
464 return -ENOENT; 464 return -ENOENT;
465 465
466 stat->nr_free++; 466 pstat->nr_free++;
467 stat->free_bytes += bytes; 467 pstat->free_bytes += bytes;
468 468
469 return 0; 469 return 0;
470} 470}
@@ -640,9 +640,9 @@ static void print_page_summary(void)
640 nr_page_frees, total_page_free_bytes / 1024); 640 nr_page_frees, total_page_free_bytes / 1024);
641 printf("\n"); 641 printf("\n");
642 642
643 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", 643 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
644 nr_alloc_freed, (total_alloc_freed_bytes) / 1024); 644 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
645 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", 645 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
646 nr_page_allocs - nr_alloc_freed, 646 nr_page_allocs - nr_alloc_freed,
647 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); 647 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
648 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", 648 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 476cdf7afcca..b63aeda719be 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -329,7 +329,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
329 fprintf(stdout, "\n\n"); 329 fprintf(stdout, "\n\n");
330 } 330 }
331 331
332 if (sort_order == default_sort_order && 332 if (sort_order == NULL &&
333 parent_pattern == default_parent_pattern) { 333 parent_pattern == default_parent_pattern) {
334 fprintf(stdout, "#\n# (%s)\n#\n", help); 334 fprintf(stdout, "#\n# (%s)\n#\n", help);
335 335
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1cb3436276d1..6a4d5d41c671 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -733,7 +733,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
733"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 733"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
734"Check /proc/sys/kernel/kptr_restrict.\n\n" 734"Check /proc/sys/kernel/kptr_restrict.\n\n"
735"Kernel%s samples will not be resolved.\n", 735"Kernel%s samples will not be resolved.\n",
736 !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? 736 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
737 " modules" : ""); 737 " modules" : "");
738 if (use_browser <= 0) 738 if (use_browser <= 0)
739 sleep(5); 739 sleep(5);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index e124741be187..e122970361f2 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2241,10 +2241,11 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
2241 if (err < 0) 2241 if (err < 0)
2242 goto out_error_mmap; 2242 goto out_error_mmap;
2243 2243
2244 if (!target__none(&trace->opts.target))
2245 perf_evlist__enable(evlist);
2246
2244 if (forks) 2247 if (forks)
2245 perf_evlist__start_workload(evlist); 2248 perf_evlist__start_workload(evlist);
2246 else
2247 perf_evlist__enable(evlist);
2248 2249
2249 trace->multiple_threads = evlist->threads->map[0] == -1 || 2250 trace->multiple_threads = evlist->threads->map[0] == -1 ||
2250 evlist->threads->nr > 1 || 2251 evlist->threads->nr > 1 ||
@@ -2272,6 +2273,11 @@ next_event:
2272 2273
2273 if (interrupted) 2274 if (interrupted)
2274 goto out_disable; 2275 goto out_disable;
2276
2277 if (done && !draining) {
2278 perf_evlist__disable(evlist);
2279 draining = true;
2280 }
2275 } 2281 }
2276 } 2282 }
2277 2283
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index d8bb616ff57c..d05b77cf35f7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1084,6 +1084,8 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
1084 * 1084 *
1085 * TODO:Group name support 1085 * TODO:Group name support
1086 */ 1086 */
1087 if (!arg)
1088 return -EINVAL;
1087 1089
1088 ptr = strpbrk(arg, ";=@+%"); 1090 ptr = strpbrk(arg, ";=@+%");
1089 if (ptr && *ptr == '=') { /* Event name */ 1091 if (ptr && *ptr == '=') { /* Event name */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index b5bf9d5efeaf..2a76e14db732 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -578,10 +578,12 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
578 /* Search child die for local variables and parameters. */ 578 /* Search child die for local variables and parameters. */
579 if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { 579 if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
580 /* Search again in global variables */ 580 /* Search again in global variables */
581 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) 581 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var,
582 0, &vr_die)) {
582 pr_warning("Failed to find '%s' in this function.\n", 583 pr_warning("Failed to find '%s' in this function.\n",
583 pf->pvar->var); 584 pf->pvar->var);
584 ret = -ENOENT; 585 ret = -ENOENT;
586 }
585 } 587 }
586 if (ret >= 0) 588 if (ret >= 0)
587 ret = convert_variable(&vr_die, pf); 589 ret = convert_variable(&vr_die, pf);
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 5a161175bbd4..a9099d9f8f39 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -26,7 +26,7 @@ override define EMIT_TESTS
26 $(MAKE) -s -C ebb emit_tests 26 $(MAKE) -s -C ebb emit_tests
27endef 27endef
28 28
29DEFAULT_INSTALL := $(INSTALL_RULE) 29DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
30override define INSTALL_RULE 30override define INSTALL_RULE
31 $(DEFAULT_INSTALL_RULE) 31 $(DEFAULT_INSTALL_RULE)
32 $(MAKE) -C ebb install 32 $(MAKE) -C ebb install
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 1b616fa79e93..6bff955e1d55 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,4 +1,4 @@
1TEST_PROGS := tm-resched-dscr tm-syscall 1TEST_PROGS := tm-resched-dscr
2 2
3all: $(TEST_PROGS) 3all: $(TEST_PROGS)
4 4