aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-06-04 16:53:02 -0400
committerRalf Baechle <ralf@linux-mips.org>2014-06-04 16:53:02 -0400
commitf8647b506d7116a1a3accd8d618184096e85f50b (patch)
treef10bc7201fda2a36c035548a0ea62210ad57adb6
parent1d421ca9d7edbac1eb118631ee039d50ab54771e (diff)
parentf7a89f1b8eb598ac5da61c9795b3d847baa73d12 (diff)
Merge branch '3.15-fixes' into mips-for-linux-next
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci2
-rw-r--r--Documentation/DocBook/drm.tmpl12
-rw-r--r--Documentation/DocBook/media/Makefile2
-rw-r--r--Documentation/debugging-via-ohci1394.txt13
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mdio-gpio.txt2
-rw-r--r--Documentation/email-clients.txt15
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/hwmon/sysfs-interface14
-rw-r--r--Documentation/java.txt8
-rw-r--r--Documentation/networking/filter.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt2
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/armada-380.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-385.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts12
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi24
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/ste-ccu8540.dts1
-rw-r--r--arch/arm/common/bL_switcher.c10
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/include/asm/trusted_foundations.h2
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/kernel/entry-header.S4
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c8
-rw-r--r--arch/arm/mach-exynos/firmware.c15
-rw-r--r--arch/arm/mach-imx/devices/platform-ipu-core.c2
-rw-r--r--arch/arm/mach-mvebu/mvebu-soc-id.c13
-rw-r--r--arch/arm/mach-omap2/board-flash.c2
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c3
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c25
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c2
-rw-r--r--arch/arm/mm/proc-v7m.S8
-rw-r--r--arch/arm/plat-omap/dma.c10
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/include/asm/cpu-info.h4
-rw-r--r--arch/mips/include/asm/msa.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h6
-rw-r--r--arch/mips/kernel/branch.c8
-rw-r--r--arch/mips/kernel/ptrace.c14
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_mfgpt.c11
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/page.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/pci/pci-rc32434.c1
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h7
-rw-r--r--arch/powerpc/include/asm/sections.h11
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S104
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c3
-rw-r--r--arch/s390/crypto/aes_s390.c3
-rw-r--r--arch/s390/crypto/des_s390.c3
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h6
-rw-r--r--arch/sparc/kernel/sysfs.c2
-rw-r--r--arch/sparc/lib/NG2memcpy.S1
-rw-r--r--arch/sparc/mm/fault_64.c16
-rw-r--r--arch/sparc/mm/tsb.c14
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c117
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/acpi_processor.c1
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/battery.c329
-rw-r--r--drivers/acpi/blacklist.c21
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_imx.c179
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/char/random.c7
-rw-r--r--drivers/char/tpm/tpm_ppi.c8
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c33
-rw-r--r--drivers/clk/bcm/clk-kona.c64
-rw-r--r--drivers/clk/bcm/clk-kona.h28
-rw-r--r--drivers/clk/clk-divider.c37
-rw-r--r--drivers/clk/clk.c74
-rw-r--r--drivers/clk/shmobile/clk-mstp.c9
-rw-r--r--drivers/clk/socfpga/clk-pll.c7
-rw-r--r--drivers/clk/socfpga/clk.c23
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c66
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/iscsi_ibft.c1
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpio-mcp23s08.c12
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/synaptics.c166
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-crypt.c61
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-thin.c29
-rw-r--r--drivers/media/i2c/ov7670.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/media-device.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c16
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c35
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/fc2580_priv.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c48
-rw-r--r--drivers/media/usb/gspca/sonixb.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c12
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c36
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c110
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c181
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h47
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c108
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c133
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c57
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c4
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/sh/Makefile14
-rw-r--r--drivers/sh/pm_runtime.c20
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c16
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi.c124
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c7
-rw-r--r--drivers/staging/imx-drm/imx-tve.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c13
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_devtable.h2
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/target_core_device.c12
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--fs/afs/cmservice.c19
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c86
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/dcache.c153
-rw-r--r--fs/kernfs/file.c17
-rw-r--r--fs/nfsd/nfs4acl.c2
-rw-r--r--fs/nfsd/nfs4state.c15
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c8
-rw-r--r--fs/splice.c6
-rw-r--r--fs/sysfs/file.c3
-rw-r--r--fs/sysfs/mount.c3
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c8
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_qm.c26
-rw-r--r--fs/xfs/xfs_super.c4
-rw-r--r--include/linux/amba/bus.h1
-rw-r--r--include/linux/cgroup.h15
-rw-r--r--include/linux/dmaengine.h1
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_vlan.h15
-rw-r--r--include/linux/kernfs.h19
-rw-r--r--include/linux/mlx4/qp.h11
-rw-r--r--include/linux/net.h15
-rw-r--r--include/linux/netdevice.h34
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/of_mdio.h7
-rw-r--r--include/linux/omap-dma.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/net/cfg80211.h12
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/netns/ipv4.h9
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cgroup_freezer.c116
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/events/core.c174
-rw-r--r--kernel/futex.c52
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/locking/rtmutex.c32
-rw-r--r--kernel/sched/core.c70
-rw-r--r--kernel/sched/cpudeadline.c37
-rw-r--r--kernel/sched/cpudeadline.h6
-rw-r--r--kernel/sched/cpupri.c10
-rw-r--r--kernel/sched/cpupri.h2
-rw-r--r--kernel/sched/cputime.c32
-rw-r--r--kernel/sched/deadline.c5
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c27
-rw-r--r--mm/memory-failure.c17
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c52
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/fragmentation.c11
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/originator.c62
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/ceph/messenger.c20
-rw-r--r--net/ceph/osdmap.c5
-rw-r--r--net/core/dev.c102
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/utils.c8
-rw-r--r--net/dsa/dsa.c3
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/ip_forward.c54
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/ip_output.c51
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_vti.c5
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/ping.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c42
-rw-r--r--net/ipv4/xfrm4_output.c32
-rw-r--r--net/ipv4/xfrm4_protocol.c19
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/netfilter.c6
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/xfrm6_output.c22
-rw-r--r--net/ipv6/xfrm6_protocol.c11
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c20
-rw-r--r--net/mac80211/offchannel.c27
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/status.c5
-rw-r--r--net/mac80211/trace.h4
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mac80211/vht.c9
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_tables_core.c49
-rw-r--r--net/netfilter/nfnetlink.c8
-rw-r--r--net/rxrpc/ar-key.c2
-rw-r--r--net/sched/cls_tcindex.c30
-rw-r--r--net/wireless/scan.c12
-rw-r--r--net/wireless/sme.c2
-rwxr-xr-xscripts/checksyscalls.sh5
-rw-r--r--sound/core/pcm_dmaengine.c6
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--tools/Makefile6
-rw-r--r--tools/lib/lockdep/Makefile5
396 files changed, 5489 insertions, 2523 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index a3c5a6685036..ab8d76dfaa80 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -117,7 +117,7 @@ Description:
117 117
118What: /sys/bus/pci/devices/.../vpd 118What: /sys/bus/pci/devices/.../vpd
119Date: February 2008 119Date: February 2008
120Contact: Ben Hutchings <bhutchings@solarflare.com> 120Contact: Ben Hutchings <bwh@kernel.org>
121Description: 121Description:
122 A file named vpd in a device directory will be a 122 A file named vpd in a device directory will be a
123 binary file containing the Vital Product Data for the 123 binary file containing the Vital Product Data for the
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 677a02553ec0..ba60d93c1855 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -79,7 +79,7 @@
79 <partintro> 79 <partintro>
80 <para> 80 <para>
81 This first part of the DRM Developer's Guide documents core DRM code, 81 This first part of the DRM Developer's Guide documents core DRM code,
82 helper libraries for writting drivers and generic userspace interfaces 82 helper libraries for writing drivers and generic userspace interfaces
83 exposed by DRM drivers. 83 exposed by DRM drivers.
84 </para> 84 </para>
85 </partintro> 85 </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
459 providing a solution to every graphics memory-related problems, GEM 459 providing a solution to every graphics memory-related problems, GEM
460 identified common code between drivers and created a support library to 460 identified common code between drivers and created a support library to
461 share it. GEM has simpler initialization and execution requirements than 461 share it. GEM has simpler initialization and execution requirements than
462 TTM, but has no video RAM management capabitilies and is thus limited to 462 TTM, but has no video RAM management capabilities and is thus limited to
463 UMA devices. 463 UMA devices.
464 </para> 464 </para>
465 <sect2> 465 <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
889 vice versa. Drivers must use the kernel dma-buf buffer sharing framework 889 vice versa. Drivers must use the kernel dma-buf buffer sharing framework
890 to manage the PRIME file descriptors. Similar to the mode setting 890 to manage the PRIME file descriptors. Similar to the mode setting
891 API PRIME is agnostic to the underlying buffer object manager, as 891 API PRIME is agnostic to the underlying buffer object manager, as
892 long as handles are 32bit unsinged integers. 892 long as handles are 32bit unsigned integers.
893 </para> 893 </para>
894 <para> 894 <para>
895 While non-GEM drivers must implement the operations themselves, GEM 895 While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
2356 first create properties and then create and associate individual instances 2356 first create properties and then create and associate individual instances
2357 of those properties to objects. A property can be instantiated multiple 2357 of those properties to objects. A property can be instantiated multiple
2358 times and associated with different objects. Values are stored in property 2358 times and associated with different objects. Values are stored in property
2359 instances, and all other property information are stored in the propery 2359 instances, and all other property information are stored in the property
2360 and shared between all instances of the property. 2360 and shared between all instances of the property.
2361 </para> 2361 </para>
2362 <para> 2362 <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
2697 <sect1> 2697 <sect1>
2698 <title>Legacy Support Code</title> 2698 <title>Legacy Support Code</title>
2699 <para> 2699 <para>
2700 The section very brievely covers some of the old legacy support code which 2700 The section very briefly covers some of the old legacy support code which
2701 is only used by old DRM drivers which have done a so-called shadow-attach 2701 is only used by old DRM drivers which have done a so-called shadow-attach
2702 to the underlying device instead of registering as a real driver. This 2702 to the underlying device instead of registering as a real driver. This
2703 also includes some of the old generic buffer mangement and command 2703 also includes some of the old generic buffer management and command
2704 submission code. Do not use any of this in new and modern drivers. 2704 submission code. Do not use any of this in new and modern drivers.
2705 </para> 2705 </para>
2706 2706
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index f9fd615427fb..1d27f0a1abd1 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
195# 195#
196 196
197install_media_images = \ 197install_media_images = \
198 $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api 198 $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
199 199
200$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64 200$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
201 $(Q)base64 -d $< >$@ 201 $(Q)base64 -d $< >$@
diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt
index fa0151a712f9..5c9a567b3fac 100644
--- a/Documentation/debugging-via-ohci1394.txt
+++ b/Documentation/debugging-via-ohci1394.txt
@@ -25,9 +25,11 @@ using data transfer rates in the order of 10MB/s or more.
25With most FireWire controllers, memory access is limited to the low 4 GB 25With most FireWire controllers, memory access is limited to the low 4 GB
26of physical address space. This can be a problem on IA64 machines where 26of physical address space. This can be a problem on IA64 machines where
27memory is located mostly above that limit, but it is rarely a problem on 27memory is located mostly above that limit, but it is rarely a problem on
28more common hardware such as x86, x86-64 and PowerPC. However, at least 28more common hardware such as x86, x86-64 and PowerPC.
29Agere/LSI FW643e and FW643e2 controllers are known to support access to 29
30physical addresses above 4 GB. 30At least LSI FW643e and FW643e2 controllers are known to support access to
31physical addresses above 4 GB, but this feature is currently not enabled by
32Linux.
31 33
32Together with a early initialization of the OHCI-1394 controller for debugging, 34Together with a early initialization of the OHCI-1394 controller for debugging,
33this facility proved most useful for examining long debugs logs in the printk 35this facility proved most useful for examining long debugs logs in the printk
@@ -101,8 +103,9 @@ Step-by-step instructions for using firescope with early OHCI initialization:
101 compliant, they are based on TI PCILynx chips and require drivers for Win- 103 compliant, they are based on TI PCILynx chips and require drivers for Win-
102 dows operating systems. 104 dows operating systems.
103 105
104 The mentioned kernel log message contains ">4 GB phys DMA" in case of 106 The mentioned kernel log message contains the string "physUB" if the
105 OHCI-1394 controllers which support accesses above this limit. 107 controller implements a writable Physical Upper Bound register. This is
108 required for physical DMA above 4 GB (but not utilized by Linux yet).
106 109
1072) Establish a working FireWire cable connection: 1102) Establish a working FireWire cable connection:
108 111
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 05a27e9442bd..2f5173500bd9 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -309,7 +309,10 @@ ii) Status
309 error_if_no_space|queue_if_no_space 309 error_if_no_space|queue_if_no_space
310 If the pool runs out of data or metadata space, the pool will 310 If the pool runs out of data or metadata space, the pool will
311 either queue or error the IO destined to the data device. The 311 either queue or error the IO destined to the data device. The
312 default is to queue the IO until more space is added. 312 default is to queue the IO until more space is added or the
313 'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool
314 module parameter can be used to change this timeout -- it
315 defaults to 60 seconds but may be disabled using a value of 0.
313 316
314iii) Messages 317iii) Messages
315 318
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index 5992dceec7af..02a25d99ca61 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -43,7 +43,7 @@ Example
43 clock-output-names = 43 clock-output-names =
44 "tpu0", "mmcif1", "sdhi3", "sdhi2", 44 "tpu0", "mmcif1", "sdhi3", "sdhi2",
45 "sdhi1", "sdhi0", "mmcif0"; 45 "sdhi1", "sdhi0", "mmcif0";
46 renesas,clock-indices = < 46 clock-indices = <
47 R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3 47 R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
48 R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0 48 R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0
49 R8A7790_CLK_MMCIF0 49 R8A7790_CLK_MMCIF0
diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt
index c79bab025369..8dbcf8295c6c 100644
--- a/Documentation/devicetree/bindings/net/mdio-gpio.txt
+++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt
@@ -14,7 +14,7 @@ node.
14Example: 14Example:
15 15
16aliases { 16aliases {
17 mdio-gpio0 = <&mdio0>; 17 mdio-gpio0 = &mdio0;
18}; 18};
19 19
20mdio0: mdio { 20mdio0: mdio {
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index e9f5daccbd02..4e30ebaa9e5b 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
201 201
202- Edit your Thunderbird config settings so that it won't use format=flowed. 202- Edit your Thunderbird config settings so that it won't use format=flowed.
203 Go to "edit->preferences->advanced->config editor" to bring up the 203 Go to "edit->preferences->advanced->config editor" to bring up the
204 thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to 204 thunderbird's registry editor.
205 "false".
206 205
207- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false". 206- Set "mailnews.send_plaintext_flowed" to "false"
208 207
209- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true". 208- Set "mailnews.wraplength" from "72" to "0"
210 209
211- Enable UTF8: Set "prefs.converted-to-utf8" to "true". 210- "View" > "Message Body As" > "Plain Text"
212 211
213- Install the "toggle wordwrap" extension. Download the file from: 212- "View" > "Character Encoding" > "Unicode (UTF-8)"
214 https://addons.mozilla.org/thunderbird/addon/2351/
215 Then go to "tools->add ons", select "install" at the bottom of the screen,
216 and browse to where you saved the .xul file. This adds an "Enable
217 Wordwrap" entry under the Options menu of the message composer.
218 213
219~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 214~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
220TkRat (GUI) 215TkRat (GUI)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 8b9cd8eb3f91..264bcde0c51c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1245,8 +1245,9 @@ second). The meanings of the columns are as follows, from left to right:
1245 1245
1246The "intr" line gives counts of interrupts serviced since boot time, for each 1246The "intr" line gives counts of interrupts serviced since boot time, for each
1247of the possible system interrupts. The first column is the total of all 1247of the possible system interrupts. The first column is the total of all
1248interrupts serviced; each subsequent column is the total for that particular 1248interrupts serviced including unnumbered architecture specific interrupts;
1249interrupt. 1249each subsequent column is the total for that particular numbered interrupt.
1250Unnumbered interrupts are not shown, only summed into the total.
1250 1251
1251The "ctxt" line gives the total number of context switches across all CPUs. 1252The "ctxt" line gives the total number of context switches across all CPUs.
1252 1253
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 79f8257dd790..2cc95ad46604 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
327 from the max value. 327 from the max value.
328 RW 328 RW
329 329
330temp[1-*]_min_hyst
331 Temperature hysteresis value for min limit.
332 Unit: millidegree Celsius
333 Must be reported as an absolute temperature, NOT a delta
334 from the min value.
335 RW
336
330temp[1-*]_input Temperature input value. 337temp[1-*]_input Temperature input value.
331 Unit: millidegree Celsius 338 Unit: millidegree Celsius
332 RO 339 RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit Temperature critical min value, typically lower than
362 Unit: millidegree Celsius 369 Unit: millidegree Celsius
363 RW 370 RW
364 371
372temp[1-*]_lcrit_hyst
373 Temperature hysteresis value for critical min limit.
374 Unit: millidegree Celsius
375 Must be reported as an absolute temperature, NOT a delta
376 from the critical min value.
377 RW
378
365temp[1-*]_offset 379temp[1-*]_offset
366 Temperature offset which is added to the temperature reading 380 Temperature offset which is added to the temperature reading
367 by the chip. 381 by the chip.
diff --git a/Documentation/java.txt b/Documentation/java.txt
index e6a723281547..418020584ccc 100644
--- a/Documentation/java.txt
+++ b/Documentation/java.txt
@@ -188,6 +188,9 @@ shift
188#define CP_METHODREF 10 188#define CP_METHODREF 10
189#define CP_INTERFACEMETHODREF 11 189#define CP_INTERFACEMETHODREF 11
190#define CP_NAMEANDTYPE 12 190#define CP_NAMEANDTYPE 12
191#define CP_METHODHANDLE 15
192#define CP_METHODTYPE 16
193#define CP_INVOKEDYNAMIC 18
191 194
192/* Define some commonly used error messages */ 195/* Define some commonly used error messages */
193 196
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
242 break; 245 break;
243 case CP_CLASS: 246 case CP_CLASS:
244 case CP_STRING: 247 case CP_STRING:
248 case CP_METHODTYPE:
245 seekerr = fseek(classfile, 2, SEEK_CUR); 249 seekerr = fseek(classfile, 2, SEEK_CUR);
246 break; 250 break;
251 case CP_METHODHANDLE:
252 seekerr = fseek(classfile, 3, SEEK_CUR);
253 break;
247 case CP_INTEGER: 254 case CP_INTEGER:
248 case CP_FLOAT: 255 case CP_FLOAT:
249 case CP_FIELDREF: 256 case CP_FIELDREF:
250 case CP_METHODREF: 257 case CP_METHODREF:
251 case CP_INTERFACEMETHODREF: 258 case CP_INTERFACEMETHODREF:
252 case CP_NAMEANDTYPE: 259 case CP_NAMEANDTYPE:
260 case CP_INVOKEDYNAMIC:
253 seekerr = fseek(classfile, 4, SEEK_CUR); 261 seekerr = fseek(classfile, 4, SEEK_CUR);
254 break; 262 break;
255 case CP_LONG: 263 case CP_LONG:
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 81f940f4e884..e3ba753cb714 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
277 mark skb->mark 277 mark skb->mark
278 queue skb->queue_mapping 278 queue skb->queue_mapping
279 hatype skb->dev->type 279 hatype skb->dev->type
280 rxhash skb->rxhash 280 rxhash skb->hash
281 cpu raw_smp_processor_id() 281 cpu raw_smp_processor_id()
282 vlan_tci vlan_tx_tag_get(skb) 282 vlan_tci vlan_tx_tag_get(skb)
283 vlan_pr vlan_tx_tag_present(skb) 283 vlan_pr vlan_tx_tag_present(skb)
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 6fea79efb4cb..38112d512f47 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
578 578
579Currently implemented fanout policies are: 579Currently implemented fanout policies are:
580 580
581 - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash 581 - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
582 - PACKET_FANOUT_LB: schedule to socket by round-robin 582 - PACKET_FANOUT_LB: schedule to socket by round-robin
583 - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on 583 - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
584 - PACKET_FANOUT_RND: schedule to socket by random selection 584 - PACKET_FANOUT_RND: schedule to socket by random selection
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a9380ba54c8e..b4f53653c106 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2126,7 +2126,7 @@ into the hash PTE second double word).
21264.75 KVM_IRQFD 21264.75 KVM_IRQFD
2127 2127
2128Capability: KVM_CAP_IRQFD 2128Capability: KVM_CAP_IRQFD
2129Architectures: x86 2129Architectures: x86 s390
2130Type: vm ioctl 2130Type: vm ioctl
2131Parameters: struct kvm_irqfd (in) 2131Parameters: struct kvm_irqfd (in)
2132Returns: 0 on success, -1 on error 2132Returns: 0 on success, -1 on error
diff --git a/MAINTAINERS b/MAINTAINERS
index bb36478f3b92..bf5d82d28748 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -537,7 +537,7 @@ L: linux-alpha@vger.kernel.org
537F: arch/alpha/ 537F: arch/alpha/
538 538
539ALTERA TRIPLE SPEED ETHERNET DRIVER 539ALTERA TRIPLE SPEED ETHERNET DRIVER
540M: Vince Bridgers <vbridgers2013@gmail.com 540M: Vince Bridgers <vbridgers2013@gmail.com>
541L: netdev@vger.kernel.org 541L: netdev@vger.kernel.org
542L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 542L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
543S: Maintained 543S: Maintained
@@ -6523,10 +6523,10 @@ T: git git://openrisc.net/~jonas/linux
6523F: arch/openrisc/ 6523F: arch/openrisc/
6524 6524
6525OPENVSWITCH 6525OPENVSWITCH
6526M: Jesse Gross <jesse@nicira.com> 6526M: Pravin Shelar <pshelar@nicira.com>
6527L: dev@openvswitch.org 6527L: dev@openvswitch.org
6528W: http://openvswitch.org 6528W: http://openvswitch.org
6529T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git 6529T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
6530S: Maintained 6530S: Maintained
6531F: net/openvswitch/ 6531F: net/openvswitch/
6532 6532
@@ -7414,6 +7414,14 @@ F: drivers/rpmsg/
7414F: Documentation/rpmsg.txt 7414F: Documentation/rpmsg.txt
7415F: include/linux/rpmsg.h 7415F: include/linux/rpmsg.h
7416 7416
7417RESET CONTROLLER FRAMEWORK
7418M: Philipp Zabel <p.zabel@pengutronix.de>
7419S: Maintained
7420F: drivers/reset/
7421F: Documentation/devicetree/bindings/reset/
7422F: include/linux/reset.h
7423F: include/linux/reset-controller.h
7424
7417RFKILL 7425RFKILL
7418M: Johannes Berg <johannes@sipsolutions.net> 7426M: Johannes Berg <johannes@sipsolutions.net>
7419L: linux-wireless@vger.kernel.org 7427L: linux-wireless@vger.kernel.org
diff --git a/Makefile b/Makefile
index 8a8440a3578e..cdaa5b6a1c4d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 15 2PATCHLEVEL = 15
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc8
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
index 068031f0f263..6d0f03c98ee9 100644
--- a/arch/arm/boot/dts/armada-380.dtsi
+++ b/arch/arm/boot/dts/armada-380.dtsi
@@ -99,7 +99,7 @@
99 pcie@3,0 { 99 pcie@3,0 {
100 device_type = "pci"; 100 device_type = "pci";
101 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; 101 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
102 reg = <0x1000 0 0 0 0>; 102 reg = <0x1800 0 0 0 0>;
103 #address-cells = <3>; 103 #address-cells = <3>;
104 #size-cells = <2>; 104 #size-cells = <2>;
105 #interrupt-cells = <1>; 105 #interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
index e2919f02e1d4..da801964a257 100644
--- a/arch/arm/boot/dts/armada-385.dtsi
+++ b/arch/arm/boot/dts/armada-385.dtsi
@@ -110,7 +110,7 @@
110 pcie@3,0 { 110 pcie@3,0 {
111 device_type = "pci"; 111 device_type = "pci";
112 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; 112 assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
113 reg = <0x1000 0 0 0 0>; 113 reg = <0x1800 0 0 0 0>;
114 #address-cells = <3>; 114 #address-cells = <3>;
115 #size-cells = <2>; 115 #size-cells = <2>;
116 #interrupt-cells = <1>; 116 #interrupt-cells = <1>;
@@ -131,7 +131,7 @@
131 pcie@4,0 { 131 pcie@4,0 {
132 device_type = "pci"; 132 device_type = "pci";
133 assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; 133 assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
134 reg = <0x1000 0 0 0 0>; 134 reg = <0x2000 0 0 0 0>;
135 #address-cells = <3>; 135 #address-cells = <3>;
136 #size-cells = <2>; 136 #size-cells = <2>;
137 #interrupt-cells = <1>; 137 #interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 366fc2cbcd64..c0e0eae16a27 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -641,7 +641,7 @@
641 trigger@3 { 641 trigger@3 {
642 reg = <3>; 642 reg = <3>;
643 trigger-name = "external"; 643 trigger-name = "external";
644 trigger-value = <0x13>; 644 trigger-value = <0xd>;
645 trigger-external; 645 trigger-external;
646 }; 646 };
647 }; 647 };
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 9583563dd0ef..8a558b7ac999 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -503,7 +503,7 @@
503 status = "okay"; 503 status = "okay";
504 504
505 ak8975@0c { 505 ak8975@0c {
506 compatible = "ak,ak8975"; 506 compatible = "asahi-kasei,ak8975";
507 reg = <0x0c>; 507 reg = <0x0c>;
508 gpios = <&gpj0 7 0>; 508 gpios = <&gpj0 7 0>;
509 }; 509 };
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 090f9830b129..cde19c818667 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -107,6 +107,7 @@
107 regulator-name = "VDD_IOPERI_1.8V"; 107 regulator-name = "VDD_IOPERI_1.8V";
108 regulator-min-microvolt = <1800000>; 108 regulator-min-microvolt = <1800000>;
109 regulator-max-microvolt = <1800000>; 109 regulator-max-microvolt = <1800000>;
110 regulator-always-on;
110 op_mode = <1>; 111 op_mode = <1>;
111 }; 112 };
112 113
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 80a3bf4c5986..896a2a6619e0 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -364,16 +364,4 @@
364 gpio-key,wakeup; 364 gpio-key,wakeup;
365 }; 365 };
366 }; 366 };
367
368 amba {
369 mdma1: mdma@11C10000 {
370 /*
371 * MDMA1 can support both secure and non-secure
372 * AXI transactions. When this is enabled in the kernel
373 * for boards that run in secure mode, we are getting
374 * imprecise external aborts causing the kernel to oops.
375 */
376 status = "disabled";
377 };
378 };
379}; 367};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index c3a9a66c5767..b69fbcb7dcb8 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -219,16 +219,6 @@
219 reg = <0x100440C0 0x20>; 219 reg = <0x100440C0 0x20>;
220 }; 220 };
221 221
222 mau_pd: power-domain@100440E0 {
223 compatible = "samsung,exynos4210-pd";
224 reg = <0x100440E0 0x20>;
225 };
226
227 g2d_pd: power-domain@10044100 {
228 compatible = "samsung,exynos4210-pd";
229 reg = <0x10044100 0x20>;
230 };
231
232 msc_pd: power-domain@10044120 { 222 msc_pd: power-domain@10044120 {
233 compatible = "samsung,exynos4210-pd"; 223 compatible = "samsung,exynos4210-pd";
234 reg = <0x10044120 0x20>; 224 reg = <0x10044120 0x20>;
@@ -336,6 +326,13 @@
336 #dma-cells = <1>; 326 #dma-cells = <1>;
337 #dma-channels = <8>; 327 #dma-channels = <8>;
338 #dma-requests = <1>; 328 #dma-requests = <1>;
329 /*
330 * MDMA1 can support both secure and non-secure
331 * AXI transactions. When this is enabled in the kernel
332 * for boards that run in secure mode, we are getting
333 * imprecise external aborts causing the kernel to oops.
334 */
335 status = "disabled";
339 }; 336 };
340 }; 337 };
341 338
@@ -385,7 +382,7 @@
385 spi_0: spi@12d20000 { 382 spi_0: spi@12d20000 {
386 compatible = "samsung,exynos4210-spi"; 383 compatible = "samsung,exynos4210-spi";
387 reg = <0x12d20000 0x100>; 384 reg = <0x12d20000 0x100>;
388 interrupts = <0 66 0>; 385 interrupts = <0 68 0>;
389 dmas = <&pdma0 5 386 dmas = <&pdma0 5
390 &pdma0 4>; 387 &pdma0 4>;
391 dma-names = "tx", "rx"; 388 dma-names = "tx", "rx";
@@ -401,7 +398,7 @@
401 spi_1: spi@12d30000 { 398 spi_1: spi@12d30000 {
402 compatible = "samsung,exynos4210-spi"; 399 compatible = "samsung,exynos4210-spi";
403 reg = <0x12d30000 0x100>; 400 reg = <0x12d30000 0x100>;
404 interrupts = <0 67 0>; 401 interrupts = <0 69 0>;
405 dmas = <&pdma1 5 402 dmas = <&pdma1 5
406 &pdma1 4>; 403 &pdma1 4>;
407 dma-names = "tx", "rx"; 404 dma-names = "tx", "rx";
@@ -417,7 +414,7 @@
417 spi_2: spi@12d40000 { 414 spi_2: spi@12d40000 {
418 compatible = "samsung,exynos4210-spi"; 415 compatible = "samsung,exynos4210-spi";
419 reg = <0x12d40000 0x100>; 416 reg = <0x12d40000 0x100>;
420 interrupts = <0 68 0>; 417 interrupts = <0 70 0>;
421 dmas = <&pdma0 7 418 dmas = <&pdma0 7
422 &pdma0 6>; 419 &pdma0 6>;
423 dma-names = "tx", "rx"; 420 dma-names = "tx", "rx";
@@ -730,6 +727,5 @@
730 interrupts = <0 112 0>; 727 interrupts = <0 112 0>;
731 clocks = <&clock 471>; 728 clocks = <&clock 471>;
732 clock-names = "secss"; 729 clock-names = "secss";
733 samsung,power-domain = <&g2d_pd>;
734 }; 730 };
735}; 731};
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index 7c8c12969892..a3431d784870 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -244,7 +244,7 @@
244&tve { 244&tve {
245 pinctrl-names = "default"; 245 pinctrl-names = "default";
246 pinctrl-0 = <&pinctrl_vga_sync_1>; 246 pinctrl-0 = <&pinctrl_vga_sync_1>;
247 i2c-ddc-bus = <&i2c3>; 247 ddc-i2c-bus = <&i2c3>;
248 fsl,tve-mode = "vga"; 248 fsl,tve-mode = "vga";
249 fsl,hsync-pin = <4>; 249 fsl,hsync-pin = <4>;
250 fsl,vsync-pin = <6>; 250 fsl,vsync-pin = <6>;
diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
index 7f3baf51a3a9..32dd55e5f4e6 100644
--- a/arch/arm/boot/dts/ste-ccu8540.dts
+++ b/arch/arm/boot/dts/ste-ccu8540.dts
@@ -18,6 +18,7 @@
18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; 18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
19 19
20 memory@0 { 20 memory@0 {
21 device_type = "memory";
21 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; 22 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
22 }; 23 };
23 24
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index f01c0ee0c87e..490f3dced749 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void)
433{ 433{
434 int i; 434 int i;
435 435
436 for_each_cpu(i, &bL_switcher_removed_logical_cpus) 436 for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
437 cpu_up(i); 437 struct device *cpu_dev = get_cpu_device(i);
438 int ret = device_online(cpu_dev);
439 if (ret)
440 dev_err(cpu_dev, "switcher: unable to restore CPU\n");
441 }
438} 442}
439 443
440static int bL_switcher_halve_cpus(void) 444static int bL_switcher_halve_cpus(void)
@@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void)
521 continue; 525 continue;
522 } 526 }
523 527
524 ret = cpu_down(i); 528 ret = device_offline(get_cpu_device(i));
525 if (ret) { 529 if (ret) {
526 bL_switcher_restore_cpus(); 530 bL_switcher_restore_cpus();
527 return ret; 531 return ret;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 4ce7b70ea901..e07a227ec0db 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -65,6 +65,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
65CONFIG_I2C=y 65CONFIG_I2C=y
66CONFIG_I2C_MUX=y 66CONFIG_I2C_MUX=y
67CONFIG_I2C_ARB_GPIO_CHALLENGE=y 67CONFIG_I2C_ARB_GPIO_CHALLENGE=y
68CONFIG_I2C_EXYNOS5=y
68CONFIG_I2C_S3C2410=y 69CONFIG_I2C_S3C2410=y
69CONFIG_DEBUG_GPIO=y 70CONFIG_DEBUG_GPIO=y
70# CONFIG_HWMON is not set 71# CONFIG_HWMON is not set
diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h
index b5f7705abcb0..624e1d436c6c 100644
--- a/arch/arm/include/asm/trusted_foundations.h
+++ b/arch/arm/include/asm/trusted_foundations.h
@@ -54,7 +54,9 @@ static inline void register_trusted_foundations(
54 */ 54 */
55 pr_err("No support for Trusted Foundations, continuing in degraded mode.\n"); 55 pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
56 pr_err("Secondary processors as well as CPU PM will be disabled.\n"); 56 pr_err("Secondary processors as well as CPU PM will be disabled.\n");
57#if IS_ENABLED(CONFIG_SMP)
57 setup_max_cpus = 0; 58 setup_max_cpus = 0;
59#endif
58 cpu_idle_poll_ctrl(true); 60 cpu_idle_poll_ctrl(true);
59} 61}
60 62
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 12c3a5decc60..75d95799b6e6 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
171#define __put_user_check(x,p) \ 171#define __put_user_check(x,p) \
172 ({ \ 172 ({ \
173 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 173 unsigned long __limit = current_thread_info()->addr_limit - 1; \
174 const typeof(*(p)) __user *__tmp_p = (p); \
174 register const typeof(*(p)) __r2 asm("r2") = (x); \ 175 register const typeof(*(p)) __r2 asm("r2") = (x); \
175 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 176 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
176 register unsigned long __l asm("r1") = __limit; \ 177 register unsigned long __l asm("r1") = __limit; \
177 register int __e asm("r0"); \ 178 register int __e asm("r0"); \
178 switch (sizeof(*(__p))) { \ 179 switch (sizeof(*(__p))) { \
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1420725142ca..efb208de75ec 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -132,6 +132,10 @@
132 orrne r5, V7M_xPSR_FRAMEPTRALIGN 132 orrne r5, V7M_xPSR_FRAMEPTRALIGN
133 biceq r5, V7M_xPSR_FRAMEPTRALIGN 133 biceq r5, V7M_xPSR_FRAMEPTRALIGN
134 134
135 @ ensure bit 0 is cleared in the PC, otherwise behaviour is
136 @ unpredictable
137 bic r4, #1
138
135 @ write basic exception frame 139 @ write basic exception frame
136 stmdb r2!, {r1, r3-r5} 140 stmdb r2!, {r1, r3-r5}
137 ldmia sp, {r1, r3-r5} 141 ldmia sp, {r1, r3-r5}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 3c217694ebec..cb791ac6a003 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
285 if (unwind_pop_register(ctrl, &vsp, reg)) 285 if (unwind_pop_register(ctrl, &vsp, reg))
286 return -URC_FAILURE; 286 return -URC_FAILURE;
287 287
288 if (insn & 0x80) 288 if (insn & 0x8)
289 if (unwind_pop_register(ctrl, &vsp, 14)) 289 if (unwind_pop_register(ctrl, &vsp, 14))
290 return -URC_FAILURE; 290 return -URC_FAILURE;
291 291
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index a0282928e9c1..7cd6f19945ed 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = {
1308static struct at91_adc_trigger at91_adc_triggers[] = { 1308static struct at91_adc_trigger at91_adc_triggers[] = {
1309 [0] = { 1309 [0] = {
1310 .name = "timer-counter-0", 1310 .name = "timer-counter-0",
1311 .value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN, 1311 .value = 0x1,
1312 }, 1312 },
1313 [1] = { 1313 [1] = {
1314 .name = "timer-counter-1", 1314 .name = "timer-counter-1",
1315 .value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN, 1315 .value = 0x3,
1316 }, 1316 },
1317 [2] = { 1317 [2] = {
1318 .name = "timer-counter-2", 1318 .name = "timer-counter-2",
1319 .value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN, 1319 .value = 0x5,
1320 }, 1320 },
1321 [3] = { 1321 [3] = {
1322 .name = "external", 1322 .name = "external",
1323 .value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN, 1323 .value = 0xd,
1324 .is_external = true, 1324 .is_external = true,
1325 }, 1325 },
1326}; 1326};
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 932129ef26c6..aa01c4222b40 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -18,6 +18,8 @@
18 18
19#include <mach/map.h> 19#include <mach/map.h>
20 20
21#include <plat/cpu.h>
22
21#include "smc.h" 23#include "smc.h"
22 24
23static int exynos_do_idle(void) 25static int exynos_do_idle(void)
@@ -28,13 +30,24 @@ static int exynos_do_idle(void)
28 30
29static int exynos_cpu_boot(int cpu) 31static int exynos_cpu_boot(int cpu)
30{ 32{
33 /*
34 * The second parameter of SMC_CMD_CPU1BOOT command means CPU id.
35 * But, Exynos4212 has only one secondary CPU so second parameter
36 * isn't used for informing secure firmware about CPU id.
37 */
38 if (soc_is_exynos4212())
39 cpu = 0;
40
31 exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0); 41 exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
32 return 0; 42 return 0;
33} 43}
34 44
35static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr) 45static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
36{ 46{
37 void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c + 4*cpu; 47 void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c;
48
49 if (!soc_is_exynos4212())
50 boot_reg += 4*cpu;
38 51
39 __raw_writel(boot_addr, boot_reg); 52 __raw_writel(boot_addr, boot_reg);
40 return 0; 53 return 0;
diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c b/arch/arm/mach-imx/devices/platform-ipu-core.c
index fc4dd7cedc11..6bd7c3f37ac0 100644
--- a/arch/arm/mach-imx/devices/platform-ipu-core.c
+++ b/arch/arm/mach-imx/devices/platform-ipu-core.c
@@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
77 77
78 pdev = platform_device_alloc("mx3-camera", 0); 78 pdev = platform_device_alloc("mx3-camera", 0);
79 if (!pdev) 79 if (!pdev)
80 goto err; 80 return ERR_PTR(-ENOMEM);
81 81
82 pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 82 pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
83 if (!pdev->dev.dma_mask) 83 if (!pdev->dev.dma_mask)
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
index f3d4cf53f746..09520e19b78e 100644
--- a/arch/arm/mach-mvebu/mvebu-soc-id.c
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
@@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void)
108 iounmap(pci_base); 108 iounmap(pci_base);
109 109
110res_ioremap: 110res_ioremap:
111 clk_disable_unprepare(clk); 111 /*
112 * If the PCIe unit is actually enabled and we have PCI
113 * support in the kernel, we intentionally do not release the
114 * reference to the clock. We want to keep it running since
115 * the bootloader does some PCIe link configuration that the
116 * kernel is for now unable to do, and gating the clock would
117 * make us loose this precious configuration.
118 */
119 if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
120 clk_disable_unprepare(clk);
121 clk_put(clk);
122 }
112 123
113clk_err: 124clk_err:
114 of_node_put(child); 125 of_node_put(child);
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index ac82512b9c8c..b6885e42c0a0 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
142 board_nand_data.nr_parts = nr_parts; 142 board_nand_data.nr_parts = nr_parts;
143 board_nand_data.devsize = nand_type; 143 board_nand_data.devsize = nand_type;
144 144
145 board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW; 145 board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
146 gpmc_nand_init(&board_nand_data, gpmc_t); 146 gpmc_nand_init(&board_nand_data, gpmc_t);
147} 147}
148#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */ 148#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 8f5121b89688..eb8c75ec3b1a 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
456 .clkdm_name = "dpll4_clkdm", 456 .clkdm_name = "dpll4_clkdm",
457}; 457};
458 458
459DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops); 459DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
460 dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
460 461
461static struct clk dpll4_m5x2_ck_3630 = { 462static struct clk dpll4_m5x2_ck_3630 = {
462 .name = "dpll4_m5x2_ck", 463 .name = "dpll4_m5x2_ck",
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 01fc710c8181..2498ab025fa2 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -14,6 +14,7 @@
14#include <linux/cpuidle.h> 14#include <linux/cpuidle.h>
15#include <linux/cpu_pm.h> 15#include <linux/cpu_pm.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/clockchips.h>
17 18
18#include <asm/cpuidle.h> 19#include <asm/cpuidle.h>
19#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
@@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
83{ 84{
84 struct idle_statedata *cx = state_ptr + index; 85 struct idle_statedata *cx = state_ptr + index;
85 u32 mpuss_can_lose_context = 0; 86 u32 mpuss_can_lose_context = 0;
87 int cpu_id = smp_processor_id();
86 88
87 /* 89 /*
88 * CPU0 has to wait and stay ON until CPU1 is OFF state. 90 * CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
110 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && 112 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
111 (cx->mpu_logic_state == PWRDM_POWER_OFF); 113 (cx->mpu_logic_state == PWRDM_POWER_OFF);
112 114
115 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
116
113 /* 117 /*
114 * Call idle CPU PM enter notifier chain so that 118 * Call idle CPU PM enter notifier chain so that
115 * VFP and per CPU interrupt context is saved. 119 * VFP and per CPU interrupt context is saved.
@@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
165 if (dev->cpu == 0 && mpuss_can_lose_context) 169 if (dev->cpu == 0 && mpuss_can_lose_context)
166 cpu_cluster_pm_exit(); 170 cpu_cluster_pm_exit();
167 171
172 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
173
168fail: 174fail:
169 cpuidle_coupled_parallel_barrier(dev, &abort_barrier); 175 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
170 cpu_done[dev->cpu] = false; 176 cpu_done[dev->cpu] = false;
@@ -172,6 +178,16 @@ fail:
172 return index; 178 return index;
173} 179}
174 180
181/*
182 * For each cpu, setup the broadcast timer because local timers
183 * stops for the states above C1.
184 */
185static void omap_setup_broadcast_timer(void *arg)
186{
187 int cpu = smp_processor_id();
188 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
189}
190
175static struct cpuidle_driver omap4_idle_driver = { 191static struct cpuidle_driver omap4_idle_driver = {
176 .name = "omap4_idle", 192 .name = "omap4_idle",
177 .owner = THIS_MODULE, 193 .owner = THIS_MODULE,
@@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = {
189 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 205 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
190 .exit_latency = 328 + 440, 206 .exit_latency = 328 + 440,
191 .target_residency = 960, 207 .target_residency = 960,
192 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | 208 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
193 CPUIDLE_FLAG_TIMER_STOP,
194 .enter = omap_enter_idle_coupled, 209 .enter = omap_enter_idle_coupled,
195 .name = "C2", 210 .name = "C2",
196 .desc = "CPUx OFF, MPUSS CSWR", 211 .desc = "CPUx OFF, MPUSS CSWR",
@@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = {
199 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 214 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
200 .exit_latency = 460 + 518, 215 .exit_latency = 460 + 518,
201 .target_residency = 1100, 216 .target_residency = 1100,
202 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | 217 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
203 CPUIDLE_FLAG_TIMER_STOP,
204 .enter = omap_enter_idle_coupled, 218 .enter = omap_enter_idle_coupled,
205 .name = "C3", 219 .name = "C3",
206 .desc = "CPUx OFF, MPUSS OSWR", 220 .desc = "CPUx OFF, MPUSS OSWR",
@@ -231,5 +245,8 @@ int __init omap4_idle_init(void)
231 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 245 if (!cpu_clkdm[0] || !cpu_clkdm[1])
232 return -ENODEV; 246 return -ENODEV;
233 247
248 /* Configure the broadcast timer on each cpu */
249 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
250
234 return cpuidle_register(&omap4_idle_driver, cpu_online_mask); 251 return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
235} 252}
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 892317294fdc..e829664e6a6c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
895 * current exception. 895 * current exception.
896 */ 896 */
897 897
898 .flags = HWMOD_EXT_OPT_MAIN_CLK, 898 .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
899 .main_clk = "pad_clks_ck", 899 .main_clk = "pad_clks_ck",
900 .prcm = { 900 .prcm = {
901 .omap4 = { 901 .omap4 = {
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 0c93588fcb91..1ca37c72f12f 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -123,6 +123,11 @@ __v7m_setup:
123 mov pc, lr 123 mov pc, lr
124ENDPROC(__v7m_setup) 124ENDPROC(__v7m_setup)
125 125
126 .align 2
127__v7m_setup_stack:
128 .space 4 * 8 @ 8 registers
129__v7m_setup_stack_top:
130
126 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 131 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
127 132
128 .section ".rodata" 133 .section ".rodata"
@@ -152,6 +157,3 @@ __v7m_proc_info:
152 .long nop_cache_fns @ proc_info_list.cache 157 .long nop_cache_fns @ proc_info_list.cache
153 .size __v7m_proc_info, . - __v7m_proc_info 158 .size __v7m_proc_info, . - __v7m_proc_info
154 159
155__v7m_setup_stack:
156 .space 4 * 8 @ 8 registers
157__v7m_setup_stack_top:
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 5f5b975887fc..b5608b1f9fbd 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -70,6 +70,7 @@ static u32 errata;
70 70
71static struct omap_dma_global_context_registers { 71static struct omap_dma_global_context_registers {
72 u32 dma_irqenable_l0; 72 u32 dma_irqenable_l0;
73 u32 dma_irqenable_l1;
73 u32 dma_ocp_sysconfig; 74 u32 dma_ocp_sysconfig;
74 u32 dma_gcr; 75 u32 dma_gcr;
75} omap_dma_global_context; 76} omap_dma_global_context;
@@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq;
1973 1974
1974/*----------------------------------------------------------------------------*/ 1975/*----------------------------------------------------------------------------*/
1975 1976
1977/*
1978 * Note that we are currently using only IRQENABLE_L0 and L1.
1979 * As the DSP may be using IRQENABLE_L2 and L3, let's not
1980 * touch those for now.
1981 */
1976void omap_dma_global_context_save(void) 1982void omap_dma_global_context_save(void)
1977{ 1983{
1978 omap_dma_global_context.dma_irqenable_l0 = 1984 omap_dma_global_context.dma_irqenable_l0 =
1979 p->dma_read(IRQENABLE_L0, 0); 1985 p->dma_read(IRQENABLE_L0, 0);
1986 omap_dma_global_context.dma_irqenable_l1 =
1987 p->dma_read(IRQENABLE_L1, 0);
1980 omap_dma_global_context.dma_ocp_sysconfig = 1988 omap_dma_global_context.dma_ocp_sysconfig =
1981 p->dma_read(OCP_SYSCONFIG, 0); 1989 p->dma_read(OCP_SYSCONFIG, 0);
1982 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0); 1990 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
@@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void)
1991 OCP_SYSCONFIG, 0); 1999 OCP_SYSCONFIG, 0);
1992 p->dma_write(omap_dma_global_context.dma_irqenable_l0, 2000 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1993 IRQENABLE_L0, 0); 2001 IRQENABLE_L0, 0);
2002 p->dma_write(omap_dma_global_context.dma_irqenable_l1,
2003 IRQENABLE_L1, 0);
1994 2004
1995 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) 2005 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
1996 p->dma_write(0x3 , IRQSTATUS_L0, 0); 2006 p->dma_write(0x3 , IRQSTATUS_L0, 0);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 90c811f05a2e..7b1c67a0b485 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -266,7 +266,7 @@ static inline pmd_t pte_pmd(pte_t pte)
266 266
267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
268 268
269#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) 269#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
270 270
271static inline int has_transparent_hugepage(void) 271static inline int has_transparent_hugepage(void)
272{ 272{
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index ae763d8bf55a..fb13dc5e8f8c 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 314 /* length of syscall table */ 14#define NR_syscalls 315 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 715e85f858de..7de0a2d65da4 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -327,5 +327,6 @@
327#define __NR_finit_module 1335 327#define __NR_finit_module 1335
328#define __NR_sched_setattr 1336 328#define __NR_sched_setattr 1336
329#define __NR_sched_getattr 1337 329#define __NR_sched_getattr 1337
330#define __NR_renameat2 1338
330 331
331#endif /* _UAPI_ASM_IA64_UNISTD_H */ 332#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index fa8d61a312a7..ba3d03503e84 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1775,6 +1775,7 @@ sys_call_table:
1775 data8 sys_finit_module // 1335 1775 data8 sys_finit_module // 1335
1776 data8 sys_sched_setattr 1776 data8 sys_sched_setattr
1777 data8 sys_sched_getattr 1777 data8 sys_sched_getattr
1778 data8 sys_renameat2
1778 1779
1779 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1780 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1780#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1781#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 9d38b73989eb..33afa56ad47a 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 351 7#define NR_syscalls 352
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index b932dd470041..9cd82fbc7817 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -356,5 +356,6 @@
356#define __NR_finit_module 348 356#define __NR_finit_module 348
357#define __NR_sched_setattr 349 357#define __NR_sched_setattr 349
358#define __NR_sched_getattr 350 358#define __NR_sched_getattr 350
359#define __NR_renameat2 351
359 360
360#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 361#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index b6223dc41d82..501e10212789 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -371,4 +371,5 @@ ENTRY(sys_call_table)
371 .long sys_finit_module 371 .long sys_finit_module
372 .long sys_sched_setattr 372 .long sys_sched_setattr
373 .long sys_sched_getattr /* 350 */ 373 .long sys_sched_getattr /* 350 */
374 .long sys_renameat2
374 375
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 4852ae97e7df..a8521de14791 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -120,7 +120,7 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
120 -fno-omit-frame-pointer 120 -fno-omit-frame-pointer
121 121
122ifeq ($(CONFIG_CPU_HAS_MSA),y) 122ifeq ($(CONFIG_CPU_HAS_MSA),y)
123toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -mmsa) 123toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
124cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA 124cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
125endif 125endif
126 126
@@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
151 -Wa,--trap 151 -Wa,--trap
152cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ 152cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
153 -Wa,--trap 153 -Wa,--trap
154cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ 154cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \
155 -Wa,--trap 155 -Wa,--trap
156cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap 156cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
157cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ 157cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 7ba0e07a9091..47d5967ce7ef 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -39,14 +39,14 @@ struct cache_desc {
39#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ 39#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
40 40
41struct cpuinfo_mips { 41struct cpuinfo_mips {
42 unsigned int udelay_val; 42 unsigned long asid_cache;
43 unsigned int asid_cache;
44 43
45 /* 44 /*
46 * Capability and feature descriptor structure for MIPS CPU 45 * Capability and feature descriptor structure for MIPS CPU
47 */ 46 */
48 unsigned long options; 47 unsigned long options;
49 unsigned long ases; 48 unsigned long ases;
49 unsigned int udelay_val;
50 unsigned int processor_id; 50 unsigned int processor_id;
51 unsigned int fpu_id; 51 unsigned int fpu_id;
52 unsigned int msa_id; 52 unsigned int msa_id;
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index 52450a040f44..538f6d482db8 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -84,7 +84,7 @@ static inline void write_msa_##name(unsigned int val) \
84 __asm__ __volatile__( \ 84 __asm__ __volatile__( \
85 " .set push\n" \ 85 " .set push\n" \
86 " .set msa\n" \ 86 " .set msa\n" \
87 " cfcmsa $" #cs ", %0\n" \ 87 " ctcmsa $" #cs ", %0\n" \
88 " .set pop\n" \ 88 " .set pop\n" \
89 : : "r"(val)); \ 89 : : "r"(val)); \
90} 90}
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 2692abb28e36..5805414777e0 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,7 +381,7 @@
381#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 381#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
382 382
383#define __NR_O32_Linux 4000 383#define __NR_O32_Linux 4000
384#define __NR_O32_Linux_syscalls 350 384#define __NR_O32_Linux_syscalls 351
385 385
386#if _MIPS_SIM == _MIPS_SIM_ABI64 386#if _MIPS_SIM == _MIPS_SIM_ABI64
387 387
@@ -710,7 +710,7 @@
710#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 710#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
711 711
712#define __NR_64_Linux 5000 712#define __NR_64_Linux 5000
713#define __NR_64_Linux_syscalls 310 713#define __NR_64_Linux_syscalls 311
714 714
715#if _MIPS_SIM == _MIPS_SIM_NABI32 715#if _MIPS_SIM == _MIPS_SIM_NABI32
716 716
@@ -1043,6 +1043,6 @@
1043#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1043#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1044 1044
1045#define __NR_N32_Linux 6000 1045#define __NR_N32_Linux 6000
1046#define __NR_N32_Linux_syscalls 314 1046#define __NR_N32_Linux_syscalls 315
1047 1047
1048#endif /* _UAPI_ASM_UNISTD_H */ 1048#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index e198d9bf17bb..7b2df224f041 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -513,7 +513,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
513 if (regs->regs[insn.i_format.rs] == 513 if (regs->regs[insn.i_format.rs] ==
514 regs->regs[insn.i_format.rt]) { 514 regs->regs[insn.i_format.rt]) {
515 epc = epc + 4 + (insn.i_format.simmediate << 2); 515 epc = epc + 4 + (insn.i_format.simmediate << 2);
516 if (insn.i_format.rt == beql_op) 516 if (insn.i_format.opcode == beql_op)
517 ret = BRANCH_LIKELY_TAKEN; 517 ret = BRANCH_LIKELY_TAKEN;
518 } else 518 } else
519 epc += 8; 519 epc += 8;
@@ -525,7 +525,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
525 if (regs->regs[insn.i_format.rs] != 525 if (regs->regs[insn.i_format.rs] !=
526 regs->regs[insn.i_format.rt]) { 526 regs->regs[insn.i_format.rt]) {
527 epc = epc + 4 + (insn.i_format.simmediate << 2); 527 epc = epc + 4 + (insn.i_format.simmediate << 2);
528 if (insn.i_format.rt == bnel_op) 528 if (insn.i_format.opcode == bnel_op)
529 ret = BRANCH_LIKELY_TAKEN; 529 ret = BRANCH_LIKELY_TAKEN;
530 } else 530 } else
531 epc += 8; 531 epc += 8;
@@ -537,7 +537,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
537 /* rt field assumed to be zero */ 537 /* rt field assumed to be zero */
538 if ((long)regs->regs[insn.i_format.rs] <= 0) { 538 if ((long)regs->regs[insn.i_format.rs] <= 0) {
539 epc = epc + 4 + (insn.i_format.simmediate << 2); 539 epc = epc + 4 + (insn.i_format.simmediate << 2);
540 if (insn.i_format.rt == bnel_op) 540 if (insn.i_format.opcode == blezl_op)
541 ret = BRANCH_LIKELY_TAKEN; 541 ret = BRANCH_LIKELY_TAKEN;
542 } else 542 } else
543 epc += 8; 543 epc += 8;
@@ -549,7 +549,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
549 /* rt field assumed to be zero */ 549 /* rt field assumed to be zero */
550 if ((long)regs->regs[insn.i_format.rs] > 0) { 550 if ((long)regs->regs[insn.i_format.rs] > 0) {
551 epc = epc + 4 + (insn.i_format.simmediate << 2); 551 epc = epc + 4 + (insn.i_format.simmediate << 2);
552 if (insn.i_format.rt == bnel_op) 552 if (insn.i_format.opcode == bgtzl_op)
553 ret = BRANCH_LIKELY_TAKEN; 553 ret = BRANCH_LIKELY_TAKEN;
554 } else 554 } else
555 epc += 8; 555 epc += 8;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 71f85f427034..f639ccd5060c 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child,
163 enum pt_watch_style style; 163 enum pt_watch_style style;
164 int i; 164 int i;
165 165
166 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 166 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
167 return -EIO; 167 return -EIO;
168 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) 168 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
169 return -EIO; 169 return -EIO;
@@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child,
177#endif 177#endif
178 178
179 __put_user(style, &addr->style); 179 __put_user(style, &addr->style);
180 __put_user(current_cpu_data.watch_reg_use_cnt, 180 __put_user(boot_cpu_data.watch_reg_use_cnt,
181 &addr->WATCH_STYLE.num_valid); 181 &addr->WATCH_STYLE.num_valid);
182 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 182 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
183 __put_user(child->thread.watch.mips3264.watchlo[i], 183 __put_user(child->thread.watch.mips3264.watchlo[i],
184 &addr->WATCH_STYLE.watchlo[i]); 184 &addr->WATCH_STYLE.watchlo[i]);
185 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, 185 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
186 &addr->WATCH_STYLE.watchhi[i]); 186 &addr->WATCH_STYLE.watchhi[i]);
187 __put_user(current_cpu_data.watch_reg_masks[i], 187 __put_user(boot_cpu_data.watch_reg_masks[i],
188 &addr->WATCH_STYLE.watch_masks[i]); 188 &addr->WATCH_STYLE.watch_masks[i]);
189 } 189 }
190 for (; i < 8; i++) { 190 for (; i < 8; i++) {
@@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child,
204 unsigned long lt[NUM_WATCH_REGS]; 204 unsigned long lt[NUM_WATCH_REGS];
205 u16 ht[NUM_WATCH_REGS]; 205 u16 ht[NUM_WATCH_REGS];
206 206
207 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) 207 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
208 return -EIO; 208 return -EIO;
209 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) 209 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
210 return -EIO; 210 return -EIO;
211 /* Check the values. */ 211 /* Check the values. */
212 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 212 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
213 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); 213 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
214#ifdef CONFIG_32BIT 214#ifdef CONFIG_32BIT
215 if (lt[i] & __UA_LIMIT) 215 if (lt[i] & __UA_LIMIT)
@@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child,
228 return -EINVAL; 228 return -EINVAL;
229 } 229 }
230 /* Install them. */ 230 /* Install them. */
231 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { 231 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
232 if (lt[i] & 7) 232 if (lt[i] & 7)
233 watch_active = 1; 233 watch_active = 1;
234 child->thread.watch.mips3264.watchlo[i] = lt[i]; 234 child->thread.watch.mips3264.watchlo[i] = lt[i];
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 1fd1a0c4f104..51706d6dd5b0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -704,10 +704,12 @@ int process_fpemu_return(int sig, void __user *fault_addr)
704 si.si_addr = fault_addr; 704 si.si_addr = fault_addr;
705 si.si_signo = sig; 705 si.si_signo = sig;
706 if (sig == SIGSEGV) { 706 if (sig == SIGSEGV) {
707 down_read(&current->mm->mmap_sem);
707 if (find_vma(current->mm, (unsigned long)fault_addr)) 708 if (find_vma(current->mm, (unsigned long)fault_addr))
708 si.si_code = SEGV_ACCERR; 709 si.si_code = SEGV_ACCERR;
709 else 710 else
710 si.si_code = SEGV_MAPERR; 711 si.si_code = SEGV_MAPERR;
712 up_read(&current->mm->mmap_sem);
711 } else { 713 } else {
712 si.si_code = BUS_ADRERR; 714 si.si_code = BUS_ADRERR;
713 } 715 }
@@ -1537,7 +1539,7 @@ asmlinkage void cache_parity_error(void)
1537 reg_val & (1<<30) ? "secondary" : "primary", 1539 reg_val & (1<<30) ? "secondary" : "primary",
1538 reg_val & (1<<31) ? "data" : "insn"); 1540 reg_val & (1<<31) ? "data" : "insn");
1539 if (cpu_has_mips_r2 && 1541 if (cpu_has_mips_r2 &&
1540 ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { 1542 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1541 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1543 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1542 reg_val & (1<<29) ? "ED " : "", 1544 reg_val & (1<<29) ? "ED " : "",
1543 reg_val & (1<<28) ? "ET " : "", 1545 reg_val & (1<<28) ? "ET " : "",
@@ -1577,7 +1579,7 @@ asmlinkage void do_ftlb(void)
1577 1579
1578 /* For the moment, report the problem and hang. */ 1580 /* For the moment, report the problem and hang. */
1579 if (cpu_has_mips_r2 && 1581 if (cpu_has_mips_r2 &&
1580 ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { 1582 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1581 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1583 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1582 read_c0_ecc()); 1584 read_c0_ecc());
1583 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1585 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
index c639b9db0012..12c75db23420 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
@@ -27,8 +27,7 @@
27 27
28#include <cs5536/cs5536_mfgpt.h> 28#include <cs5536/cs5536_mfgpt.h>
29 29
30DEFINE_SPINLOCK(mfgpt_lock); 30static DEFINE_RAW_SPINLOCK(mfgpt_lock);
31EXPORT_SYMBOL(mfgpt_lock);
32 31
33static u32 mfgpt_base; 32static u32 mfgpt_base;
34 33
@@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter);
55static void init_mfgpt_timer(enum clock_event_mode mode, 54static void init_mfgpt_timer(enum clock_event_mode mode,
56 struct clock_event_device *evt) 55 struct clock_event_device *evt)
57{ 56{
58 spin_lock(&mfgpt_lock); 57 raw_spin_lock(&mfgpt_lock);
59 58
60 switch (mode) { 59 switch (mode) {
61 case CLOCK_EVT_MODE_PERIODIC: 60 case CLOCK_EVT_MODE_PERIODIC:
@@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode,
79 /* Nothing to do here */ 78 /* Nothing to do here */
80 break; 79 break;
81 } 80 }
82 spin_unlock(&mfgpt_lock); 81 raw_spin_unlock(&mfgpt_lock);
83} 82}
84 83
85static struct clock_event_device mfgpt_clockevent = { 84static struct clock_event_device mfgpt_clockevent = {
@@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
157 static int old_count; 156 static int old_count;
158 static u32 old_jifs; 157 static u32 old_jifs;
159 158
160 spin_lock_irqsave(&mfgpt_lock, flags); 159 raw_spin_lock_irqsave(&mfgpt_lock, flags);
161 /* 160 /*
162 * Although our caller may have the read side of xtime_lock, 161 * Although our caller may have the read side of xtime_lock,
163 * this is now a seqlock, and we are cheating in this routine 162 * this is now a seqlock, and we are cheating in this routine
@@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
191 old_count = count; 190 old_count = count;
192 old_jifs = jifs; 191 old_jifs = jifs;
193 192
194 spin_unlock_irqrestore(&mfgpt_lock, flags); 193 raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
195 194
196 return (cycle_t) (jifs * COMPARE) + count; 195 return (cycle_t) (jifs * COMPARE) + count;
197} 196}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index f41a5c5b0865..05b1d7cf9514 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -137,8 +137,10 @@ static void octeon_flush_cache_sigtramp(unsigned long addr)
137{ 137{
138 struct vm_area_struct *vma; 138 struct vm_area_struct *vma;
139 139
140 down_read(&current->mm->mmap_sem);
140 vma = find_vma(current->mm, addr); 141 vma = find_vma(current->mm, addr);
141 octeon_flush_icache_all_cores(vma); 142 octeon_flush_icache_all_cores(vma);
143 up_read(&current->mm->mmap_sem);
142} 144}
143 145
144 146
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 58033c44690d..b611102e23b5 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -273,7 +273,7 @@ void build_clear_page(void)
273 uasm_i_ori(&buf, A2, A0, off); 273 uasm_i_ori(&buf, A2, A0, off);
274 274
275 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 275 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
276 uasm_i_lui(&buf, AT, 0xa000); 276 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
277 277
278 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) 278 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
279 * cache_line_size : 0; 279 * cache_line_size : 0;
@@ -424,7 +424,7 @@ void build_copy_page(void)
424 uasm_i_ori(&buf, A2, A0, off); 424 uasm_i_ori(&buf, A2, A0, off);
425 425
426 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) 426 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
427 uasm_i_lui(&buf, AT, 0xa000); 427 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
428 428
429 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * 429 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
430 cache_line_size : 0; 430 cache_line_size : 0;
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 1ca34887d990..6d9773096750 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L;
27fw_memblock_t * __init fw_getmdesc(int eva) 27fw_memblock_t * __init fw_getmdesc(int eva)
28{ 28{
29 char *memsize_str, *ememsize_str = NULL, *ptr; 29 char *memsize_str, *ememsize_str = NULL, *ptr;
30 unsigned long memsize, ememsize = 0; 30 unsigned long memsize = 0, ememsize = 0;
31 static char cmdline[COMMAND_LINE_SIZE] __initdata; 31 static char cmdline[COMMAND_LINE_SIZE] __initdata;
32 int tmp; 32 int tmp;
33 33
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index b128cb973ebe..7f6ce6d734c0 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = {
53 .start = 0x50000000, 53 .start = 0x50000000,
54 .end = 0x5FFFFFFF, 54 .end = 0x5FFFFFFF,
55 .flags = IORESOURCE_MEM, 55 .flags = IORESOURCE_MEM,
56 .parent = &rc32434_res_pci_mem1,
57 .sibling = NULL, 56 .sibling = NULL,
58 .child = &rc32434_res_pci_mem2 57 .child = &rc32434_res_pci_mem2
59}; 58};
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 265ae5190b0a..47e0e21d2272 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -829,8 +829,9 @@
829#define __NR_sched_setattr (__NR_Linux + 334) 829#define __NR_sched_setattr (__NR_Linux + 334)
830#define __NR_sched_getattr (__NR_Linux + 335) 830#define __NR_sched_getattr (__NR_Linux + 335)
831#define __NR_utimes (__NR_Linux + 336) 831#define __NR_utimes (__NR_Linux + 336)
832#define __NR_renameat2 (__NR_Linux + 337)
832 833
833#define __NR_Linux_syscalls (__NR_utimes + 1) 834#define __NR_Linux_syscalls (__NR_renameat2 + 1)
834 835
835 836
836#define __IGNORE_select /* newselect */ 837#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 83ead0ea127d..c5fa7a697fba 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -432,6 +432,7 @@
432 ENTRY_SAME(sched_setattr) 432 ENTRY_SAME(sched_setattr)
433 ENTRY_SAME(sched_getattr) /* 335 */ 433 ENTRY_SAME(sched_getattr) /* 335 */
434 ENTRY_COMP(utimes) 434 ENTRY_COMP(utimes)
435 ENTRY_SAME(renameat2)
435 436
436 /* Nothing yet */ 437 /* Nothing yet */
437 438
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 4c0cedf4e2c7..ce4c68a4a823 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -150,7 +150,9 @@ endif
150 150
151CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) 151CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
152 152
153KBUILD_CPPFLAGS += -Iarch/$(ARCH) 153asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
154
155KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
154KBUILD_AFLAGS += -Iarch/$(ARCH) 156KBUILD_AFLAGS += -Iarch/$(ARCH)
155KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) 157KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
156CPP = $(CC) -E $(KBUILD_CFLAGS) 158CPP = $(CC) -E $(KBUILD_CFLAGS)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 6586a40a46ce..cded7c1278ef 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -318,11 +318,16 @@ n:
318 addi reg,reg,(name - 0b)@l; 318 addi reg,reg,(name - 0b)@l;
319 319
320#ifdef __powerpc64__ 320#ifdef __powerpc64__
321#ifdef HAVE_AS_ATHIGH
322#define __AS_ATHIGH high
323#else
324#define __AS_ATHIGH h
325#endif
321#define LOAD_REG_IMMEDIATE(reg,expr) \ 326#define LOAD_REG_IMMEDIATE(reg,expr) \
322 lis reg,(expr)@highest; \ 327 lis reg,(expr)@highest; \
323 ori reg,reg,(expr)@higher; \ 328 ori reg,reg,(expr)@higher; \
324 rldicr reg,reg,32,31; \ 329 rldicr reg,reg,32,31; \
325 oris reg,reg,(expr)@h; \ 330 oris reg,reg,(expr)@__AS_ATHIGH; \
326 ori reg,reg,(expr)@l; 331 ori reg,reg,(expr)@l;
327 332
328#define LOAD_REG_ADDR(reg,name) \ 333#define LOAD_REG_ADDR(reg,name) \
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index d0e784e0ff48..521790330672 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
39 (unsigned long)_stext < end; 39 (unsigned long)_stext < end;
40} 40}
41 41
42static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
43{
44#ifdef CONFIG_KVM_GUEST
45 extern char kvm_tmp[];
46 return start < (unsigned long)kvm_tmp &&
47 (unsigned long)&kvm_tmp[1024 * 1024] < end;
48#else
49 return 0;
50#endif
51}
52
42#undef dereference_function_descriptor 53#undef dereference_function_descriptor
43static inline void *dereference_function_descriptor(void *ptr) 54static inline void *dereference_function_descriptor(void *ptr)
44{ 55{
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3ddf70276706..ea4dc3a89c1f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -361,3 +361,4 @@ SYSCALL(finit_module)
361SYSCALL(ni_syscall) /* sys_kcmp */ 361SYSCALL(ni_syscall) /* sys_kcmp */
362SYSCALL_SPU(sched_setattr) 362SYSCALL_SPU(sched_setattr)
363SYSCALL_SPU(sched_getattr) 363SYSCALL_SPU(sched_getattr)
364SYSCALL_SPU(renameat2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4494f029b632..9b892bbd9d84 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 357 15#define __NR_syscalls 358
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 881bf2e2560d..2d526f7b48da 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -379,5 +379,6 @@
379#define __NR_kcmp 354 379#define __NR_kcmp 354
380#define __NR_sched_setattr 355 380#define __NR_sched_setattr 355
381#define __NR_sched_getattr 356 381#define __NR_sched_getattr 356
382#define __NR_renameat2 357
382 383
383#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 384#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 6a0175297b0d..dd8695f6cb6d 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -74,7 +74,7 @@
74#define KVM_INST_MTSRIN 0x7c0001e4 74#define KVM_INST_MTSRIN 0x7c0001e4
75 75
76static bool kvm_patching_worked = true; 76static bool kvm_patching_worked = true;
77static char kvm_tmp[1024 * 1024]; 77char kvm_tmp[1024 * 1024];
78static int kvm_tmp_index; 78static int kvm_tmp_index;
79 79
80static inline void kvm_patch_ins(u32 *inst, u32 new_inst) 80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 59d229a2a3e0..879b3aacac32 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
237 if (!cpu_online(cpu)) { 237 if (!cpu_online(cpu)) {
238 printk(KERN_INFO "kexec: Waking offline cpu %d.\n", 238 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
239 cpu); 239 cpu);
240 cpu_up(cpu); 240 WARN_ON(cpu_up(cpu));
241 } 241 }
242 } 242 }
243} 243}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 122a580f7322..7e711bdcc6da 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -813,9 +813,6 @@ static void __init clocksource_init(void)
813static int decrementer_set_next_event(unsigned long evt, 813static int decrementer_set_next_event(unsigned long evt,
814 struct clock_event_device *dev) 814 struct clock_event_device *dev)
815{ 815{
816 /* Don't adjust the decrementer if some irq work is pending */
817 if (test_irq_work_pending())
818 return 0;
819 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 816 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
820 set_dec(evt); 817 set_dec(evt);
821 818
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 94e597e6f15c..7af190a266b3 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
886 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 886 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
887 if (r) 887 if (r)
888 return r; 888 return r;
889#ifdef CONFIG_KVM_BOOK3S_32 889#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
890 r = kvmppc_book3s_init_pr(); 890 r = kvmppc_book3s_init_pr();
891#endif 891#endif
892 return r; 892 return r;
@@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
895 895
896static void kvmppc_book3s_exit(void) 896static void kvmppc_book3s_exit(void)
897{ 897{
898#ifdef CONFIG_KVM_BOOK3S_32 898#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
899 kvmppc_book3s_exit_pr(); 899 kvmppc_book3s_exit_pr();
900#endif 900#endif
901 kvm_exit(); 901 kvm_exit();
@@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
905module_exit(kvmppc_book3s_exit); 905module_exit(kvmppc_book3s_exit);
906 906
907/* On 32bit this is our one and only kernel module */ 907/* On 32bit this is our one and only kernel module */
908#ifdef CONFIG_KVM_BOOK3S_32 908#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
909MODULE_ALIAS_MISCDEV(KVM_MINOR); 909MODULE_ALIAS_MISCDEV(KVM_MINOR);
910MODULE_ALIAS("devname:kvm"); 910MODULE_ALIAS("devname:kvm");
911#endif 911#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 1d6c56ad5b60..8fcc36306a02 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
234 pte_size = psize; 234 pte_size = psize;
235 pte = lookup_linux_pte_and_update(pgdir, hva, writing, 235 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
236 &pte_size); 236 &pte_size);
237 if (pte_present(pte)) { 237 if (pte_present(pte) && !pte_numa(pte)) {
238 if (writing && !pte_write(pte)) 238 if (writing && !pte_write(pte))
239 /* make the actual HPTE be read-only */ 239 /* make the actual HPTE be read-only */
240 ptel = hpte_make_readonly(ptel); 240 ptel = hpte_make_readonly(ptel);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b031f932c0cc..07c8b5b0f9d2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1323 mr r3, r9 1323 mr r3, r9
1324 bl kvmppc_save_fp 1324 bl kvmppc_save_fp
1325 1325
1326#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1327BEGIN_FTR_SECTION
1328 b 2f
1329END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1330 /* Turn on TM. */
1331 mfmsr r8
1332 li r0, 1
1333 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1334 mtmsrd r8
1335
1336 ld r5, VCPU_MSR(r9)
1337 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1338 beq 1f /* TM not active in guest. */
1339
1340 li r3, TM_CAUSE_KVM_RESCHED
1341
1342 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1343 li r5, 0
1344 mtmsrd r5, 1
1345
1346 /* All GPRs are volatile at this point. */
1347 TRECLAIM(R3)
1348
1349 /* Temporarily store r13 and r9 so we have some regs to play with */
1350 SET_SCRATCH0(r13)
1351 GET_PACA(r13)
1352 std r9, PACATMSCRATCH(r13)
1353 ld r9, HSTATE_KVM_VCPU(r13)
1354
1355 /* Get a few more GPRs free. */
1356 std r29, VCPU_GPRS_TM(29)(r9)
1357 std r30, VCPU_GPRS_TM(30)(r9)
1358 std r31, VCPU_GPRS_TM(31)(r9)
1359
1360 /* Save away PPR and DSCR soon so don't run with user values. */
1361 mfspr r31, SPRN_PPR
1362 HMT_MEDIUM
1363 mfspr r30, SPRN_DSCR
1364 ld r29, HSTATE_DSCR(r13)
1365 mtspr SPRN_DSCR, r29
1366
1367 /* Save all but r9, r13 & r29-r31 */
1368 reg = 0
1369 .rept 29
1370 .if (reg != 9) && (reg != 13)
1371 std reg, VCPU_GPRS_TM(reg)(r9)
1372 .endif
1373 reg = reg + 1
1374 .endr
1375 /* ... now save r13 */
1376 GET_SCRATCH0(r4)
1377 std r4, VCPU_GPRS_TM(13)(r9)
1378 /* ... and save r9 */
1379 ld r4, PACATMSCRATCH(r13)
1380 std r4, VCPU_GPRS_TM(9)(r9)
1381
1382 /* Reload stack pointer and TOC. */
1383 ld r1, HSTATE_HOST_R1(r13)
1384 ld r2, PACATOC(r13)
1385
1386 /* Set MSR RI now we have r1 and r13 back. */
1387 li r5, MSR_RI
1388 mtmsrd r5, 1
1389
1390 /* Save away checkpinted SPRs. */
1391 std r31, VCPU_PPR_TM(r9)
1392 std r30, VCPU_DSCR_TM(r9)
1393 mflr r5
1394 mfcr r6
1395 mfctr r7
1396 mfspr r8, SPRN_AMR
1397 mfspr r10, SPRN_TAR
1398 std r5, VCPU_LR_TM(r9)
1399 stw r6, VCPU_CR_TM(r9)
1400 std r7, VCPU_CTR_TM(r9)
1401 std r8, VCPU_AMR_TM(r9)
1402 std r10, VCPU_TAR_TM(r9)
1403
1404 /* Restore r12 as trap number. */
1405 lwz r12, VCPU_TRAP(r9)
1406
1407 /* Save FP/VSX. */
1408 addi r3, r9, VCPU_FPRS_TM
1409 bl .store_fp_state
1410 addi r3, r9, VCPU_VRS_TM
1411 bl .store_vr_state
1412 mfspr r6, SPRN_VRSAVE
1413 stw r6, VCPU_VRSAVE_TM(r9)
14141:
1415 /*
1416 * We need to save these SPRs after the treclaim so that the software
1417 * error code is recorded correctly in the TEXASR. Also the user may
1418 * change these outside of a transaction, so they must always be
1419 * context switched.
1420 */
1421 mfspr r5, SPRN_TFHAR
1422 mfspr r6, SPRN_TFIAR
1423 mfspr r7, SPRN_TEXASR
1424 std r5, VCPU_TFHAR(r9)
1425 std r6, VCPU_TFIAR(r9)
1426 std r7, VCPU_TEXASR(r9)
14272:
1428#endif
1429
1326 /* Increment yield count if they have a VPA */ 1430 /* Increment yield count if they have a VPA */
1327 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1431 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1328 cmpdi r8, 0 1432 cmpdi r8, 0
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c5c052a9729c..02f1defd8bb9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1153 goto free_vcpu; 1153 goto free_vcpu;
1154 vcpu->arch.book3s = vcpu_book3s; 1154 vcpu->arch.book3s = vcpu_book3s;
1155 1155
1156#ifdef CONFIG_KVM_BOOK3S_32 1156#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1157 vcpu->arch.shadow_vcpu = 1157 vcpu->arch.shadow_vcpu =
1158 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); 1158 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1159 if (!vcpu->arch.shadow_vcpu) 1159 if (!vcpu->arch.shadow_vcpu)
@@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1198uninit_vcpu: 1198uninit_vcpu:
1199 kvm_vcpu_uninit(vcpu); 1199 kvm_vcpu_uninit(vcpu);
1200free_shadow_vcpu: 1200free_shadow_vcpu:
1201#ifdef CONFIG_KVM_BOOK3S_32 1201#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1202 kfree(vcpu->arch.shadow_vcpu); 1202 kfree(vcpu->arch.shadow_vcpu);
1203free_vcpu3s: 1203free_vcpu3s:
1204#endif 1204#endif
@@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1215 1215
1216 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1216 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1217 kvm_vcpu_uninit(vcpu); 1217 kvm_vcpu_uninit(vcpu);
1218#ifdef CONFIG_KVM_BOOK3S_32 1218#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1219 kfree(vcpu->arch.shadow_vcpu); 1219 kfree(vcpu->arch.shadow_vcpu);
1220#endif 1220#endif
1221 vfree(vcpu_book3s); 1221 vfree(vcpu_book3s);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d766d6ee33fe..06ba83b036d3 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
207 if (overlaps_kernel_text(vaddr, vaddr + step)) 207 if (overlaps_kernel_text(vaddr, vaddr + step))
208 tprot &= ~HPTE_R_N; 208 tprot &= ~HPTE_R_N;
209 209
210 /* Make kvm guest trampolines executable */
211 if (overlaps_kvm_tmp(vaddr, vaddr + step))
212 tprot &= ~HPTE_R_N;
213
210 /* 214 /*
211 * If relocatable, check if it overlaps interrupt vectors that 215 * If relocatable, check if it overlaps interrupt vectors that
212 * are copied down to real 0. For relocatable kernel 216 * are copied down to real 0. For relocatable kernel
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 253fefe3d1a0..5b51079f3e3b 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
549 ret = ioda_eeh_phb_reset(hose, option); 549 ret = ioda_eeh_phb_reset(hose, option);
550 } else { 550 } else {
551 bus = eeh_pe_bus_get(pe); 551 bus = eeh_pe_bus_get(pe);
552 if (pci_is_root_bus(bus)) 552 if (pci_is_root_bus(bus) ||
553 pci_is_root_bus(bus->parent))
553 ret = ioda_eeh_root_reset(hose, option); 554 ret = ioda_eeh_root_reset(hose, option);
554 else 555 else
555 ret = ioda_eeh_bridge_reset(hose, bus->self, option); 556 ret = ioda_eeh_bridge_reset(hose, bus->self, option);
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index cf3c0089bef2..23223cd63e54 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
820 else 820 else
821 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); 821 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
822 spin_unlock(&ctrblk_lock); 822 spin_unlock(&ctrblk_lock);
823 } else {
824 if (!nbytes)
825 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
823 } 826 }
824 /* 827 /*
825 * final block may be < AES_BLOCK_SIZE, copy only nbytes 828 * final block may be < AES_BLOCK_SIZE, copy only nbytes
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0a5aac8a9412..7acb77f7ef1a 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
429 else 429 else
430 memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); 430 memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
431 spin_unlock(&ctrblk_lock); 431 spin_unlock(&ctrblk_lock);
432 } else {
433 if (!nbytes)
434 memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
432 } 435 }
433 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ 436 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
434 if (nbytes) { 437 if (nbytes) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b3ecb8f5b6ce..9ae6664ff08c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext)
158 case KVM_CAP_ONE_REG: 158 case KVM_CAP_ONE_REG:
159 case KVM_CAP_ENABLE_CAP: 159 case KVM_CAP_ENABLE_CAP:
160 case KVM_CAP_S390_CSS_SUPPORT: 160 case KVM_CAP_S390_CSS_SUPPORT:
161 case KVM_CAP_IRQFD:
161 case KVM_CAP_IOEVENTFD: 162 case KVM_CAP_IOEVENTFD:
162 case KVM_CAP_DEVICE_CTRL: 163 case KVM_CAP_DEVICE_CTRL:
163 case KVM_CAP_ENABLE_CAP_VM: 164 case KVM_CAP_ENABLE_CAP_VM:
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 452d3ebd9d0f..e9f8fa9337fe 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
811 return NULL; 811 return NULL;
812 memset(header, 0, sz); 812 memset(header, 0, sz);
813 header->pages = sz / PAGE_SIZE; 813 header->pages = sz / PAGE_SIZE;
814 hole = sz - (bpfsize + sizeof(*header)); 814 hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
815 /* Insert random number of illegal instructions before BPF code 815 /* Insert random number of illegal instructions before BPF code
816 * and make sure the first instruction starts at an even address. 816 * and make sure the first instruction starts at an even address.
817 */ 817 */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index fde5abaac0cc..1a49ffdf9da9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -24,7 +24,8 @@
24 24
25/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). 25/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
26 * The page copy blockops can use 0x6000000 to 0x8000000. 26 * The page copy blockops can use 0x6000000 to 0x8000000.
27 * The TSB is mapped in the 0x8000000 to 0xa000000 range. 27 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
28 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
28 * The PROM resides in an area spanning 0xf0000000 to 0x100000000. 29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
29 * The vmalloc area spans 0x100000000 to 0x200000000. 30 * The vmalloc area spans 0x100000000 to 0x200000000.
30 * Since modules need to be in the lowest 32-bits of the address space, 31 * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
33 * 0x400000000. 34 * 0x400000000.
34 */ 35 */
35#define TLBTEMP_BASE _AC(0x0000000006000000,UL) 36#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
36#define TSBMAP_BASE _AC(0x0000000008000000,UL) 37#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
38#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
37#define MODULES_VADDR _AC(0x0000000010000000,UL) 39#define MODULES_VADDR _AC(0x0000000010000000,UL)
38#define MODULES_LEN _AC(0x00000000e0000000,UL) 40#define MODULES_LEN _AC(0x00000000e0000000,UL)
39#define MODULES_END _AC(0x00000000f0000000,UL) 41#define MODULES_END _AC(0x00000000f0000000,UL)
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index a364000ca1aa..7f41d40b7e6e 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
151 size_t count) 151 size_t count)
152{ 152{
153 unsigned long val, err; 153 unsigned long val, err;
154 int ret = sscanf(buf, "%ld", &val); 154 int ret = sscanf(buf, "%lu", &val);
155 155
156 if (ret != 1) 156 if (ret != 1)
157 return -EINVAL; 157 return -EINVAL;
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 2c20ad63ddbf..30eee6e8a81b 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
236 */ 236 */
237 VISEntryHalf 237 VISEntryHalf
238 238
239 membar #Sync
239 alignaddr %o1, %g0, %g0 240 alignaddr %o1, %g0, %g0
240 241
241 add %o1, (64 - 1), %o4 242 add %o1, (64 - 1), %o4
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index a8ff0d1a3b69..4ced3fc66130 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
281 show_regs(regs); 281 show_regs(regs);
282} 282}
283 283
284static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
285 unsigned long addr)
286{
287 static int times;
288
289 if (times++ < 10)
290 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
291 "reports 64-bit fault address [%lx]\n",
292 current->comm, current->pid, addr);
293 show_regs(regs);
294}
295
296asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) 284asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
297{ 285{
298 enum ctx_state prev_state = exception_enter(); 286 enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
322 goto intr_or_no_mm; 310 goto intr_or_no_mm;
323 } 311 }
324 } 312 }
325 if (unlikely((address >> 32) != 0)) { 313 if (unlikely((address >> 32) != 0))
326 bogus_32bit_fault_address(regs, address);
327 goto intr_or_no_mm; 314 goto intr_or_no_mm;
328 }
329 } 315 }
330 316
331 if (regs->tstate & TSTATE_PRIV) { 317 if (regs->tstate & TSTATE_PRIV) {
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f5d506fdddad..fe19b81acc09 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
133 mm->context.tsb_block[tsb_idx].tsb_nentries = 133 mm->context.tsb_block[tsb_idx].tsb_nentries =
134 tsb_bytes / sizeof(struct tsb); 134 tsb_bytes / sizeof(struct tsb);
135 135
136 base = TSBMAP_BASE; 136 switch (tsb_idx) {
137 case MM_TSB_BASE:
138 base = TSBMAP_8K_BASE;
139 break;
140#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
141 case MM_TSB_HUGE:
142 base = TSBMAP_4M_BASE;
143 break;
144#endif
145 default:
146 BUG();
147 }
148
137 tte = pgprot_val(PAGE_KERNEL_LOCKED); 149 tte = pgprot_val(PAGE_KERNEL_LOCKED);
138 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); 150 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
139 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 151 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8de6d9cf3b95..678205195ae1 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H 1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H 2#define _ASM_X86_PAGE_64_DEFS_H
3 3
4#define THREAD_SIZE_ORDER 1 4#define THREAD_SIZE_ORDER 2
5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 5#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
6#define CURRENT_MASK (~(THREAD_SIZE - 1)) 6#define CURRENT_MASK (~(THREAD_SIZE - 1))
7 7
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index aa333d966886..adb02aa62af5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
169{ 169{
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
173 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
174 EVENT_CONSTRAINT_END 173 EVENT_CONSTRAINT_END
175}; 174};
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 33e8c028842f..138ceffc6377 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7778 7778
7779 exec_control = vmcs12->pin_based_vm_exec_control; 7779 exec_control = vmcs12->pin_based_vm_exec_control;
7780 exec_control |= vmcs_config.pin_based_exec_ctrl; 7780 exec_control |= vmcs_config.pin_based_exec_ctrl;
7781 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 7781 exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
7782 PIN_BASED_POSTED_INTR);
7782 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); 7783 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
7783 7784
7784 vmx->nested.preemption_timer_expired = false; 7785 vmx->nested.preemption_timer_expired = false;
@@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7815 if (!vmx->rdtscp_enabled) 7816 if (!vmx->rdtscp_enabled)
7816 exec_control &= ~SECONDARY_EXEC_RDTSCP; 7817 exec_control &= ~SECONDARY_EXEC_RDTSCP;
7817 /* Take the following fields only from vmcs12 */ 7818 /* Take the following fields only from vmcs12 */
7818 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 7819 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7820 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
7821 SECONDARY_EXEC_APIC_REGISTER_VIRT);
7819 if (nested_cpu_has(vmcs12, 7822 if (nested_cpu_has(vmcs12,
7820 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) 7823 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
7821 exec_control |= vmcs12->secondary_vm_exec_control; 7824 exec_control |= vmcs12->secondary_vm_exec_control;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b6c0bacca9bd..20316c67b824 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
106static u32 tsc_tolerance_ppm = 250; 106static u32 tsc_tolerance_ppm = 250;
107module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 107module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
108 108
109static bool backwards_tsc_observed = false;
110
109#define KVM_NR_SHARED_MSRS 16 111#define KVM_NR_SHARED_MSRS 16
110 112
111struct kvm_shared_msrs_global { 113struct kvm_shared_msrs_global {
@@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1486 &ka->master_kernel_ns, 1488 &ka->master_kernel_ns,
1487 &ka->master_cycle_now); 1489 &ka->master_cycle_now);
1488 1490
1489 ka->use_master_clock = host_tsc_clocksource & vcpus_matched; 1491 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1492 && !backwards_tsc_observed;
1490 1493
1491 if (ka->use_master_clock) 1494 if (ka->use_master_clock)
1492 atomic_set(&kvm_guest_has_master_clock, 1); 1495 atomic_set(&kvm_guest_has_master_clock, 1);
@@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
6945 */ 6948 */
6946 if (backwards_tsc) { 6949 if (backwards_tsc) {
6947 u64 delta_cyc = max_tsc - local_tsc; 6950 u64 delta_cyc = max_tsc - local_tsc;
6951 backwards_tsc_observed = true;
6948 list_for_each_entry(kvm, &vm_list, vm_list) { 6952 list_for_each_entry(kvm, &vm_list, vm_list) {
6949 kvm_for_each_vcpu(i, vcpu, kvm) { 6953 kvm_for_each_vcpu(i, vcpu, kvm) {
6950 vcpu->arch.tsc_offset_adjustment += delta_cyc; 6954 vcpu->arch.tsc_offset_adjustment += delta_cyc;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index dc017735bb91..6d5663a599a7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
171 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ 171 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
172 172
173 header->pages = sz / PAGE_SIZE; 173 header->pages = sz / PAGE_SIZE;
174 hole = sz - (proglen + sizeof(*header)); 174 hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
175 175
176 /* insert a random number of int3 instructions before BPF code */ 176 /* insert a random number of int3 instructions before BPF code */
177 *image_ptr = &header->image[prandom_u32() % hole]; 177 *image_ptr = &header->image[prandom_u32() % hole];
diff --git a/drivers/Makefile b/drivers/Makefile
index d05d81b19b50..7183b6af5dac 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/
119obj-y += firmware/ 119obj-y += firmware/
120obj-$(CONFIG_CRYPTO) += crypto/ 120obj-$(CONFIG_CRYPTO) += crypto/
121obj-$(CONFIG_SUPERH) += sh/ 121obj-$(CONFIG_SUPERH) += sh/
122obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ 122obj-$(CONFIG_ARCH_SHMOBILE) += sh/
123ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 123ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
124obj-y += clocksource/ 124obj-y += clocksource/
125endif 125endif
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ab686b310100..a34a22841002 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -47,6 +47,23 @@ config ACPI_SLEEP
47 depends on SUSPEND || HIBERNATION 47 depends on SUSPEND || HIBERNATION
48 default y 48 default y
49 49
50config ACPI_PROCFS_POWER
51 bool "Deprecated power /proc/acpi directories"
52 depends on PROC_FS
53 help
54 For backwards compatibility, this option allows
55 deprecated power /proc/acpi/ directories to exist, even when
56 they have been replaced by functions in /sys.
57 The deprecated directories (and their replacements) include:
58 /proc/acpi/battery/* (/sys/class/power_supply/*)
59 /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
60 This option has no effect on /proc/acpi/ directories
61 and functions, which do not yet exist in /sys
62 This option, together with the proc directories, will be
63 deleted in the future.
64
65 Say N to delete power /proc/acpi/ directories that have moved to /sys/
66
50config ACPI_EC_DEBUGFS 67config ACPI_EC_DEBUGFS
51 tristate "EC read/write access through /sys/kernel/debug/ec" 68 tristate "EC read/write access through /sys/kernel/debug/ec"
52 default n 69 default n
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 0331f91d56e6..bce34afadcd0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,6 +47,7 @@ acpi-y += sysfs.o
47acpi-$(CONFIG_X86) += acpi_cmos_rtc.o 47acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
48acpi-$(CONFIG_DEBUG_FS) += debugfs.o 48acpi-$(CONFIG_DEBUG_FS) += debugfs.o
49acpi-$(CONFIG_ACPI_NUMA) += numa.o 49acpi-$(CONFIG_ACPI_NUMA) += numa.o
50acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
50ifdef CONFIG_ACPI_VIDEO 51ifdef CONFIG_ACPI_VIDEO
51acpi-y += video_detect.o 52acpi-y += video_detect.o
52endif 53endif
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2c01c1da29ce..c67f6f5ad611 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
52MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 52MODULE_DESCRIPTION("ACPI AC Adapter Driver");
53MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54 54
55static int acpi_ac_add(struct acpi_device *device);
56static int acpi_ac_remove(struct acpi_device *device);
57static void acpi_ac_notify(struct acpi_device *device, u32 event);
58
59static const struct acpi_device_id ac_device_ids[] = {
60 {"ACPI0003", 0},
61 {"", 0},
62};
63MODULE_DEVICE_TABLE(acpi, ac_device_ids);
64
65#ifdef CONFIG_PM_SLEEP
66static int acpi_ac_resume(struct device *dev);
67#endif
68static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
69
55static int ac_sleep_before_get_state_ms; 70static int ac_sleep_before_get_state_ms;
56 71
72static struct acpi_driver acpi_ac_driver = {
73 .name = "ac",
74 .class = ACPI_AC_CLASS,
75 .ids = ac_device_ids,
76 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
77 .ops = {
78 .add = acpi_ac_add,
79 .remove = acpi_ac_remove,
80 .notify = acpi_ac_notify,
81 },
82 .drv.pm = &acpi_ac_pm,
83};
84
57struct acpi_ac { 85struct acpi_ac {
58 struct power_supply charger; 86 struct power_supply charger;
59 struct platform_device *pdev; 87 struct acpi_device * device;
60 unsigned long long state; 88 unsigned long long state;
61 struct notifier_block battery_nb; 89 struct notifier_block battery_nb;
62}; 90};
@@ -69,10 +97,12 @@ struct acpi_ac {
69 97
70static int acpi_ac_get_state(struct acpi_ac *ac) 98static int acpi_ac_get_state(struct acpi_ac *ac)
71{ 99{
72 acpi_status status; 100 acpi_status status = AE_OK;
73 acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); 101
102 if (!ac)
103 return -EINVAL;
74 104
75 status = acpi_evaluate_integer(handle, "_PSR", NULL, 105 status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
76 &ac->state); 106 &ac->state);
77 if (ACPI_FAILURE(status)) { 107 if (ACPI_FAILURE(status)) {
78 ACPI_EXCEPTION((AE_INFO, status, 108 ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
117 Driver Model 147 Driver Model
118 -------------------------------------------------------------------------- */ 148 -------------------------------------------------------------------------- */
119 149
120static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) 150static void acpi_ac_notify(struct acpi_device *device, u32 event)
121{ 151{
122 struct acpi_ac *ac = data; 152 struct acpi_ac *ac = acpi_driver_data(device);
123 struct acpi_device *adev;
124 153
125 if (!ac) 154 if (!ac)
126 return; 155 return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
143 msleep(ac_sleep_before_get_state_ms); 172 msleep(ac_sleep_before_get_state_ms);
144 173
145 acpi_ac_get_state(ac); 174 acpi_ac_get_state(ac);
146 adev = ACPI_COMPANION(&ac->pdev->dev); 175 acpi_bus_generate_netlink_event(device->pnp.device_class,
147 acpi_bus_generate_netlink_event(adev->pnp.device_class, 176 dev_name(&device->dev), event,
148 dev_name(&ac->pdev->dev), 177 (u32) ac->state);
149 event, (u32) ac->state); 178 acpi_notifier_call_chain(device, event, (u32) ac->state);
150 acpi_notifier_call_chain(adev, event, (u32) ac->state);
151 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 179 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
152 } 180 }
153 181
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
192 {}, 220 {},
193}; 221};
194 222
195static int acpi_ac_probe(struct platform_device *pdev) 223static int acpi_ac_add(struct acpi_device *device)
196{ 224{
197 int result = 0; 225 int result = 0;
198 struct acpi_ac *ac = NULL; 226 struct acpi_ac *ac = NULL;
199 struct acpi_device *adev;
200 227
201 if (!pdev)
202 return -EINVAL;
203 228
204 adev = ACPI_COMPANION(&pdev->dev); 229 if (!device)
205 if (!adev) 230 return -EINVAL;
206 return -ENODEV;
207 231
208 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); 232 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
209 if (!ac) 233 if (!ac)
210 return -ENOMEM; 234 return -ENOMEM;
211 235
212 strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); 236 ac->device = device;
213 strcpy(acpi_device_class(adev), ACPI_AC_CLASS); 237 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
214 ac->pdev = pdev; 238 strcpy(acpi_device_class(device), ACPI_AC_CLASS);
215 platform_set_drvdata(pdev, ac); 239 device->driver_data = ac;
216 240
217 result = acpi_ac_get_state(ac); 241 result = acpi_ac_get_state(ac);
218 if (result) 242 if (result)
219 goto end; 243 goto end;
220 244
221 ac->charger.name = acpi_device_bid(adev); 245 ac->charger.name = acpi_device_bid(device);
222 ac->charger.type = POWER_SUPPLY_TYPE_MAINS; 246 ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
223 ac->charger.properties = ac_props; 247 ac->charger.properties = ac_props;
224 ac->charger.num_properties = ARRAY_SIZE(ac_props); 248 ac->charger.num_properties = ARRAY_SIZE(ac_props);
225 ac->charger.get_property = get_ac_property; 249 ac->charger.get_property = get_ac_property;
226 result = power_supply_register(&pdev->dev, &ac->charger); 250 result = power_supply_register(&ac->device->dev, &ac->charger);
227 if (result) 251 if (result)
228 goto end; 252 goto end;
229 253
230 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
231 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
232 if (result) {
233 power_supply_unregister(&ac->charger);
234 goto end;
235 }
236 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 254 printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
237 acpi_device_name(adev), acpi_device_bid(adev), 255 acpi_device_name(device), acpi_device_bid(device),
238 ac->state ? "on-line" : "off-line"); 256 ac->state ? "on-line" : "off-line");
239 257
240 ac->battery_nb.notifier_call = acpi_ac_battery_notify; 258 ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
256 if (!dev) 274 if (!dev)
257 return -EINVAL; 275 return -EINVAL;
258 276
259 ac = platform_get_drvdata(to_platform_device(dev)); 277 ac = acpi_driver_data(to_acpi_device(dev));
260 if (!ac) 278 if (!ac)
261 return -EINVAL; 279 return -EINVAL;
262 280
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
270#else 288#else
271#define acpi_ac_resume NULL 289#define acpi_ac_resume NULL
272#endif 290#endif
273static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
274 291
275static int acpi_ac_remove(struct platform_device *pdev) 292static int acpi_ac_remove(struct acpi_device *device)
276{ 293{
277 struct acpi_ac *ac; 294 struct acpi_ac *ac = NULL;
295
278 296
279 if (!pdev) 297 if (!device || !acpi_driver_data(device))
280 return -EINVAL; 298 return -EINVAL;
281 299
282 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 300 ac = acpi_driver_data(device);
283 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
284 301
285 ac = platform_get_drvdata(pdev);
286 if (ac->charger.dev) 302 if (ac->charger.dev)
287 power_supply_unregister(&ac->charger); 303 power_supply_unregister(&ac->charger);
288 unregister_acpi_notifier(&ac->battery_nb); 304 unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
292 return 0; 308 return 0;
293} 309}
294 310
295static const struct acpi_device_id acpi_ac_match[] = {
296 { "ACPI0003", 0 },
297 { }
298};
299MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
300
301static struct platform_driver acpi_ac_driver = {
302 .probe = acpi_ac_probe,
303 .remove = acpi_ac_remove,
304 .driver = {
305 .name = "acpi-ac",
306 .owner = THIS_MODULE,
307 .pm = &acpi_ac_pm_ops,
308 .acpi_match_table = ACPI_PTR(acpi_ac_match),
309 },
310};
311
312static int __init acpi_ac_init(void) 311static int __init acpi_ac_init(void)
313{ 312{
314 int result; 313 int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
316 if (acpi_disabled) 315 if (acpi_disabled)
317 return -ENODEV; 316 return -ENODEV;
318 317
319 result = platform_driver_register(&acpi_ac_driver); 318 result = acpi_bus_register_driver(&acpi_ac_driver);
320 if (result < 0) 319 if (result < 0)
321 return -ENODEV; 320 return -ENODEV;
322 321
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
325 324
326static void __exit acpi_ac_exit(void) 325static void __exit acpi_ac_exit(void)
327{ 326{
328 platform_driver_unregister(&acpi_ac_driver); 327 acpi_bus_unregister_driver(&acpi_ac_driver);
329} 328}
330module_init(acpi_ac_init); 329module_init(acpi_ac_init);
331module_exit(acpi_ac_exit); 330module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index dbfe49e5fd63..1d4950388fa1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
29static const struct acpi_device_id acpi_platform_device_ids[] = { 29static const struct acpi_device_id acpi_platform_device_ids[] = {
30 30
31 { "PNP0D40" }, 31 { "PNP0D40" },
32 { "ACPI0003" },
33 { "VPC2004" }, 32 { "VPC2004" },
34 { "BCM4752" }, 33 { "BCM4752" },
35 34
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b06f5f55ada9..52c81c49cc7d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
405 goto err; 405 goto err;
406 406
407 pr->dev = dev; 407 pr->dev = dev;
408 dev->offline = pr->flags.need_hotplug_init;
409 408
410 /* Trigger the processor driver's .probe() if present. */ 409 /* Trigger the processor driver's .probe() if present. */
411 if (device_attach(dev) >= 0) 410 if (device_attach(dev) >= 0)
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 49bbc71fad54..a08a448068dd 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
141 * address. Although ACPICA adheres to the ACPI specification which 141 * address. Although ACPICA adheres to the ACPI specification which
142 * requires the use of the corresponding 64-bit address if it is non-zero, 142 * requires the use of the corresponding 64-bit address if it is non-zero,
143 * some machines have been found to have a corrupted non-zero 64-bit 143 * some machines have been found to have a corrupted non-zero 64-bit
144 * address. Default is FALSE, do not favor the 32-bit addresses. 144 * address. Default is TRUE, favor the 32-bit addresses.
145 */ 145 */
146ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); 146ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
147 147
148/* 148/*
149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility 149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index a4702eee91a8..9fb85f38de90 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
461 u32 table_count; 461 u32 table_count;
462 struct acpi_table_header *table; 462 struct acpi_table_header *table;
463 acpi_physical_address address; 463 acpi_physical_address address;
464 acpi_physical_address rsdt_address;
464 u32 length; 465 u32 length;
465 u8 *table_entry; 466 u8 *table_entry;
466 acpi_status status; 467 acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
488 * as per the ACPI specification. 489 * as per the ACPI specification.
489 */ 490 */
490 address = (acpi_physical_address) rsdp->xsdt_physical_address; 491 address = (acpi_physical_address) rsdp->xsdt_physical_address;
492 rsdt_address =
493 (acpi_physical_address) rsdp->rsdt_physical_address;
491 table_entry_size = ACPI_XSDT_ENTRY_SIZE; 494 table_entry_size = ACPI_XSDT_ENTRY_SIZE;
492 } else { 495 } else {
493 /* Root table is an RSDT (32-bit physical addresses) */ 496 /* Root table is an RSDT (32-bit physical addresses) */
494 497
495 address = (acpi_physical_address) rsdp->rsdt_physical_address; 498 address = (acpi_physical_address) rsdp->rsdt_physical_address;
499 rsdt_address = address;
496 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 500 table_entry_size = ACPI_RSDT_ENTRY_SIZE;
497 } 501 }
498 502
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
515 519
516 /* Fall back to the RSDT */ 520 /* Fall back to the RSDT */
517 521
518 address = 522 address = rsdt_address;
519 (acpi_physical_address) rsdp->rsdt_physical_address;
520 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 523 table_entry_size = ACPI_RSDT_ENTRY_SIZE;
521 } 524 }
522 } 525 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 9a2c63b20050..6e7b2a12860d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,6 +36,12 @@
36#include <linux/suspend.h> 36#include <linux/suspend.h>
37#include <asm/unaligned.h> 37#include <asm/unaligned.h>
38 38
39#ifdef CONFIG_ACPI_PROCFS_POWER
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <asm/uaccess.h>
43#endif
44
39#include <linux/acpi.h> 45#include <linux/acpi.h>
40#include <linux/power_supply.h> 46#include <linux/power_supply.h>
41 47
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
64module_param(cache_time, uint, 0644); 70module_param(cache_time, uint, 0644);
65MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 71MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
66 72
73#ifdef CONFIG_ACPI_PROCFS_POWER
74extern struct proc_dir_entry *acpi_lock_battery_dir(void);
75extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
76
77enum acpi_battery_files {
78 info_tag = 0,
79 state_tag,
80 alarm_tag,
81 ACPI_BATTERY_NUMFILES,
82};
83
84#endif
85
67static const struct acpi_device_id battery_device_ids[] = { 86static const struct acpi_device_id battery_device_ids[] = {
68 {"PNP0C0A", 0}, 87 {"PNP0C0A", 0},
69 {"", 0}, 88 {"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
299 POWER_SUPPLY_PROP_SERIAL_NUMBER, 318 POWER_SUPPLY_PROP_SERIAL_NUMBER,
300}; 319};
301 320
321#ifdef CONFIG_ACPI_PROCFS_POWER
322inline char *acpi_battery_units(struct acpi_battery *battery)
323{
324 return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
325 "mA" : "mW";
326}
327#endif
328
302/* -------------------------------------------------------------------------- 329/* --------------------------------------------------------------------------
303 Battery Management 330 Battery Management
304 -------------------------------------------------------------------------- */ 331 -------------------------------------------------------------------------- */
@@ -717,6 +744,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
717} 744}
718 745
719/* -------------------------------------------------------------------------- 746/* --------------------------------------------------------------------------
747 FS Interface (/proc)
748 -------------------------------------------------------------------------- */
749
750#ifdef CONFIG_ACPI_PROCFS_POWER
751static struct proc_dir_entry *acpi_battery_dir;
752
753static int acpi_battery_print_info(struct seq_file *seq, int result)
754{
755 struct acpi_battery *battery = seq->private;
756
757 if (result)
758 goto end;
759
760 seq_printf(seq, "present: %s\n",
761 acpi_battery_present(battery) ? "yes" : "no");
762 if (!acpi_battery_present(battery))
763 goto end;
764 if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
765 seq_printf(seq, "design capacity: unknown\n");
766 else
767 seq_printf(seq, "design capacity: %d %sh\n",
768 battery->design_capacity,
769 acpi_battery_units(battery));
770
771 if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
772 seq_printf(seq, "last full capacity: unknown\n");
773 else
774 seq_printf(seq, "last full capacity: %d %sh\n",
775 battery->full_charge_capacity,
776 acpi_battery_units(battery));
777
778 seq_printf(seq, "battery technology: %srechargeable\n",
779 (!battery->technology)?"non-":"");
780
781 if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
782 seq_printf(seq, "design voltage: unknown\n");
783 else
784 seq_printf(seq, "design voltage: %d mV\n",
785 battery->design_voltage);
786 seq_printf(seq, "design capacity warning: %d %sh\n",
787 battery->design_capacity_warning,
788 acpi_battery_units(battery));
789 seq_printf(seq, "design capacity low: %d %sh\n",
790 battery->design_capacity_low,
791 acpi_battery_units(battery));
792 seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
793 seq_printf(seq, "capacity granularity 1: %d %sh\n",
794 battery->capacity_granularity_1,
795 acpi_battery_units(battery));
796 seq_printf(seq, "capacity granularity 2: %d %sh\n",
797 battery->capacity_granularity_2,
798 acpi_battery_units(battery));
799 seq_printf(seq, "model number: %s\n", battery->model_number);
800 seq_printf(seq, "serial number: %s\n", battery->serial_number);
801 seq_printf(seq, "battery type: %s\n", battery->type);
802 seq_printf(seq, "OEM info: %s\n", battery->oem_info);
803 end:
804 if (result)
805 seq_printf(seq, "ERROR: Unable to read battery info\n");
806 return result;
807}
808
809static int acpi_battery_print_state(struct seq_file *seq, int result)
810{
811 struct acpi_battery *battery = seq->private;
812
813 if (result)
814 goto end;
815
816 seq_printf(seq, "present: %s\n",
817 acpi_battery_present(battery) ? "yes" : "no");
818 if (!acpi_battery_present(battery))
819 goto end;
820
821 seq_printf(seq, "capacity state: %s\n",
822 (battery->state & 0x04) ? "critical" : "ok");
823 if ((battery->state & 0x01) && (battery->state & 0x02))
824 seq_printf(seq,
825 "charging state: charging/discharging\n");
826 else if (battery->state & 0x01)
827 seq_printf(seq, "charging state: discharging\n");
828 else if (battery->state & 0x02)
829 seq_printf(seq, "charging state: charging\n");
830 else
831 seq_printf(seq, "charging state: charged\n");
832
833 if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
834 seq_printf(seq, "present rate: unknown\n");
835 else
836 seq_printf(seq, "present rate: %d %s\n",
837 battery->rate_now, acpi_battery_units(battery));
838
839 if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
840 seq_printf(seq, "remaining capacity: unknown\n");
841 else
842 seq_printf(seq, "remaining capacity: %d %sh\n",
843 battery->capacity_now, acpi_battery_units(battery));
844 if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
845 seq_printf(seq, "present voltage: unknown\n");
846 else
847 seq_printf(seq, "present voltage: %d mV\n",
848 battery->voltage_now);
849 end:
850 if (result)
851 seq_printf(seq, "ERROR: Unable to read battery state\n");
852
853 return result;
854}
855
856static int acpi_battery_print_alarm(struct seq_file *seq, int result)
857{
858 struct acpi_battery *battery = seq->private;
859
860 if (result)
861 goto end;
862
863 if (!acpi_battery_present(battery)) {
864 seq_printf(seq, "present: no\n");
865 goto end;
866 }
867 seq_printf(seq, "alarm: ");
868 if (!battery->alarm)
869 seq_printf(seq, "unsupported\n");
870 else
871 seq_printf(seq, "%u %sh\n", battery->alarm,
872 acpi_battery_units(battery));
873 end:
874 if (result)
875 seq_printf(seq, "ERROR: Unable to read battery alarm\n");
876 return result;
877}
878
879static ssize_t acpi_battery_write_alarm(struct file *file,
880 const char __user * buffer,
881 size_t count, loff_t * ppos)
882{
883 int result = 0;
884 char alarm_string[12] = { '\0' };
885 struct seq_file *m = file->private_data;
886 struct acpi_battery *battery = m->private;
887
888 if (!battery || (count > sizeof(alarm_string) - 1))
889 return -EINVAL;
890 if (!acpi_battery_present(battery)) {
891 result = -ENODEV;
892 goto end;
893 }
894 if (copy_from_user(alarm_string, buffer, count)) {
895 result = -EFAULT;
896 goto end;
897 }
898 alarm_string[count] = '\0';
899 battery->alarm = simple_strtol(alarm_string, NULL, 0);
900 result = acpi_battery_set_alarm(battery);
901 end:
902 if (!result)
903 return count;
904 return result;
905}
906
907typedef int(*print_func)(struct seq_file *seq, int result);
908
909static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
910 acpi_battery_print_info,
911 acpi_battery_print_state,
912 acpi_battery_print_alarm,
913};
914
915static int acpi_battery_read(int fid, struct seq_file *seq)
916{
917 struct acpi_battery *battery = seq->private;
918 int result = acpi_battery_update(battery);
919 return acpi_print_funcs[fid](seq, result);
920}
921
922#define DECLARE_FILE_FUNCTIONS(_name) \
923static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
924{ \
925 return acpi_battery_read(_name##_tag, seq); \
926} \
927static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
928{ \
929 return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
930}
931
932DECLARE_FILE_FUNCTIONS(info);
933DECLARE_FILE_FUNCTIONS(state);
934DECLARE_FILE_FUNCTIONS(alarm);
935
936#undef DECLARE_FILE_FUNCTIONS
937
938#define FILE_DESCRIPTION_RO(_name) \
939 { \
940 .name = __stringify(_name), \
941 .mode = S_IRUGO, \
942 .ops = { \
943 .open = acpi_battery_##_name##_open_fs, \
944 .read = seq_read, \
945 .llseek = seq_lseek, \
946 .release = single_release, \
947 .owner = THIS_MODULE, \
948 }, \
949 }
950
951#define FILE_DESCRIPTION_RW(_name) \
952 { \
953 .name = __stringify(_name), \
954 .mode = S_IFREG | S_IRUGO | S_IWUSR, \
955 .ops = { \
956 .open = acpi_battery_##_name##_open_fs, \
957 .read = seq_read, \
958 .llseek = seq_lseek, \
959 .write = acpi_battery_write_##_name, \
960 .release = single_release, \
961 .owner = THIS_MODULE, \
962 }, \
963 }
964
965static const struct battery_file {
966 struct file_operations ops;
967 umode_t mode;
968 const char *name;
969} acpi_battery_file[] = {
970 FILE_DESCRIPTION_RO(info),
971 FILE_DESCRIPTION_RO(state),
972 FILE_DESCRIPTION_RW(alarm),
973};
974
975#undef FILE_DESCRIPTION_RO
976#undef FILE_DESCRIPTION_RW
977
978static int acpi_battery_add_fs(struct acpi_device *device)
979{
980 struct proc_dir_entry *entry = NULL;
981 int i;
982
983 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
984 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
985 if (!acpi_device_dir(device)) {
986 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
987 acpi_battery_dir);
988 if (!acpi_device_dir(device))
989 return -ENODEV;
990 }
991
992 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
993 entry = proc_create_data(acpi_battery_file[i].name,
994 acpi_battery_file[i].mode,
995 acpi_device_dir(device),
996 &acpi_battery_file[i].ops,
997 acpi_driver_data(device));
998 if (!entry)
999 return -ENODEV;
1000 }
1001 return 0;
1002}
1003
1004static void acpi_battery_remove_fs(struct acpi_device *device)
1005{
1006 int i;
1007 if (!acpi_device_dir(device))
1008 return;
1009 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
1010 remove_proc_entry(acpi_battery_file[i].name,
1011 acpi_device_dir(device));
1012
1013 remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
1014 acpi_device_dir(device) = NULL;
1015}
1016
1017#endif
1018
1019/* --------------------------------------------------------------------------
720 Driver Interface 1020 Driver Interface
721 -------------------------------------------------------------------------- */ 1021 -------------------------------------------------------------------------- */
722 1022
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
790 result = acpi_battery_update(battery); 1090 result = acpi_battery_update(battery);
791 if (result) 1091 if (result)
792 goto fail; 1092 goto fail;
1093#ifdef CONFIG_ACPI_PROCFS_POWER
1094 result = acpi_battery_add_fs(device);
1095#endif
1096 if (result) {
1097#ifdef CONFIG_ACPI_PROCFS_POWER
1098 acpi_battery_remove_fs(device);
1099#endif
1100 goto fail;
1101 }
793 1102
794 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", 1103 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
795 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), 1104 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
816 return -EINVAL; 1125 return -EINVAL;
817 battery = acpi_driver_data(device); 1126 battery = acpi_driver_data(device);
818 unregister_pm_notifier(&battery->pm_nb); 1127 unregister_pm_notifier(&battery->pm_nb);
1128#ifdef CONFIG_ACPI_PROCFS_POWER
1129 acpi_battery_remove_fs(device);
1130#endif
819 sysfs_remove_battery(battery); 1131 sysfs_remove_battery(battery);
820 mutex_destroy(&battery->lock); 1132 mutex_destroy(&battery->lock);
821 mutex_destroy(&battery->sysfs_lock); 1133 mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
866 1178
867 if (dmi_check_system(bat_dmi_table)) 1179 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1; 1180 battery_bix_broken_package = 1;
869 acpi_bus_register_driver(&acpi_battery_driver); 1181
1182#ifdef CONFIG_ACPI_PROCFS_POWER
1183 acpi_battery_dir = acpi_lock_battery_dir();
1184 if (!acpi_battery_dir)
1185 return;
1186#endif
1187 if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
1188#ifdef CONFIG_ACPI_PROCFS_POWER
1189 acpi_unlock_battery_dir(acpi_battery_dir);
1190#endif
1191 return;
1192 }
1193 return;
870} 1194}
871 1195
872static int __init acpi_battery_init(void) 1196static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
878static void __exit acpi_battery_exit(void) 1202static void __exit acpi_battery_exit(void)
879{ 1203{
880 acpi_bus_unregister_driver(&acpi_battery_driver); 1204 acpi_bus_unregister_driver(&acpi_battery_driver);
1205#ifdef CONFIG_ACPI_PROCFS_POWER
1206 acpi_unlock_battery_dir(acpi_battery_dir);
1207#endif
881} 1208}
882 1209
883module_init(acpi_battery_init); 1210module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index afec4526c48a..3d8413d02a97 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
315 }, 315 },
316 }, 316 },
317 {
318 .callback = dmi_disable_osi_win8,
319 .ident = "Dell Inspiron 7737",
320 .matches = {
321 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
322 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
323 },
324 },
317 325
318 /* 326 /*
319 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 327 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
374 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), 382 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
375 }, 383 },
376 }, 384 },
385 /*
386 * Without this this EEEpc exports a non working WMI interface, with
387 * this it exports a working "good old" eeepc_laptop interface, fixing
388 * both brightness control, and rfkill not working.
389 */
390 {
391 .callback = dmi_enable_osi_linux,
392 .ident = "Asus EEE PC 1015PX",
393 .matches = {
394 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
395 DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
396 },
397 },
377 {} 398 {}
378}; 399};
379 400
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644
index 000000000000..6c9ee68e46fb
--- /dev/null
+++ b/drivers/acpi/cm_sbs.c
@@ -0,0 +1,105 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/types.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30
31#define PREFIX "ACPI: "
32
33ACPI_MODULE_NAME("cm_sbs");
34#define ACPI_AC_CLASS "ac_adapter"
35#define ACPI_BATTERY_CLASS "battery"
36#define _COMPONENT ACPI_SBS_COMPONENT
37static struct proc_dir_entry *acpi_ac_dir;
38static struct proc_dir_entry *acpi_battery_dir;
39
40static DEFINE_MUTEX(cm_sbs_mutex);
41
42static int lock_ac_dir_cnt;
43static int lock_battery_dir_cnt;
44
45struct proc_dir_entry *acpi_lock_ac_dir(void)
46{
47 mutex_lock(&cm_sbs_mutex);
48 if (!acpi_ac_dir)
49 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
50 if (acpi_ac_dir) {
51 lock_ac_dir_cnt++;
52 } else {
53 printk(KERN_ERR PREFIX
54 "Cannot create %s\n", ACPI_AC_CLASS);
55 }
56 mutex_unlock(&cm_sbs_mutex);
57 return acpi_ac_dir;
58}
59EXPORT_SYMBOL(acpi_lock_ac_dir);
60
61void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
62{
63 mutex_lock(&cm_sbs_mutex);
64 if (acpi_ac_dir_param)
65 lock_ac_dir_cnt--;
66 if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
67 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
68 acpi_ac_dir = NULL;
69 }
70 mutex_unlock(&cm_sbs_mutex);
71}
72EXPORT_SYMBOL(acpi_unlock_ac_dir);
73
74struct proc_dir_entry *acpi_lock_battery_dir(void)
75{
76 mutex_lock(&cm_sbs_mutex);
77 if (!acpi_battery_dir) {
78 acpi_battery_dir =
79 proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
80 }
81 if (acpi_battery_dir) {
82 lock_battery_dir_cnt++;
83 } else {
84 printk(KERN_ERR PREFIX
85 "Cannot create %s\n", ACPI_BATTERY_CLASS);
86 }
87 mutex_unlock(&cm_sbs_mutex);
88 return acpi_battery_dir;
89}
90EXPORT_SYMBOL(acpi_lock_battery_dir);
91
92void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
93{
94 mutex_lock(&cm_sbs_mutex);
95 if (acpi_battery_dir_param)
96 lock_battery_dir_cnt--;
97 if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
98 && acpi_battery_dir) {
99 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
100 acpi_battery_dir = NULL;
101 }
102 mutex_unlock(&cm_sbs_mutex);
103 return;
104}
105EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
1278 1278
1279static void __exit acpi_thermal_exit(void) 1279static void __exit acpi_thermal_exit(void)
1280{ 1280{
1281 destroy_workqueue(acpi_thermal_pm_queue);
1282 acpi_bus_unregister_driver(&acpi_thermal_driver); 1281 acpi_bus_unregister_driver(&acpi_thermal_driver);
1282 destroy_workqueue(acpi_thermal_pm_queue);
1283 1283
1284 return; 1284 return;
1285} 1285}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8b6990e417ec..f8bc5a755dda 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
457 }, 457 },
458 { 458 {
459 .callback = video_set_use_native_backlight, 459 .callback = video_set_use_native_backlight,
460 .ident = "ThinkPad T430s", 460 .ident = "ThinkPad T430 and T430s",
461 .matches = { 461 .matches = {
462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
463 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), 463 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
464 }, 464 },
465 }, 465 },
466 { 466 {
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
472 }, 472 },
473 }, 473 },
474 { 474 {
475 .callback = video_set_use_native_backlight, 475 .callback = video_set_use_native_backlight,
476 .ident = "ThinkPad X1 Carbon", 476 .ident = "ThinkPad X1 Carbon",
477 .matches = { 477 .matches = {
478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
500 .ident = "Dell Inspiron 7520", 500 .ident = "Dell Inspiron 7520",
501 .matches = { 501 .matches = {
502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
503 DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), 503 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
504 }, 504 },
505 }, 505 },
506 { 506 {
@@ -513,6 +513,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
513 }, 513 },
514 { 514 {
515 .callback = video_set_use_native_backlight, 515 .callback = video_set_use_native_backlight,
516 .ident = "Acer Aspire 5742G",
517 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
519 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
520 },
521 },
522 {
523 .callback = video_set_use_native_backlight,
516 .ident = "Acer Aspire V5-431", 524 .ident = "Acer Aspire V5-431",
517 .matches = { 525 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 526 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c2706047337f..0033fafc470b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -815,7 +815,7 @@ config PATA_AT32
815 815
816config PATA_AT91 816config PATA_AT91
817 tristate "PATA support for AT91SAM9260" 817 tristate "PATA support for AT91SAM9260"
818 depends on ARM && ARCH_AT91 818 depends on ARM && SOC_AT91SAM9
819 help 819 help
820 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. 820 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
821 821
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 71e15b73513d..60707814a84b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); 1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
1116} 1116}
1117 1117
1118static bool ahci_broken_devslp(struct pci_dev *pdev)
1119{
1120 /* device with broken DEVSLP but still showing SDS capability */
1121 static const struct pci_device_id ids[] = {
1122 { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
1123 {}
1124 };
1125
1126 return pci_match_id(ids, pdev);
1127}
1128
1118#ifdef CONFIG_ATA_ACPI 1129#ifdef CONFIG_ATA_ACPI
1119static void ahci_gtf_filter_workaround(struct ata_host *host) 1130static void ahci_gtf_filter_workaround(struct ata_host *host)
1120{ 1131{
@@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1364 1375
1365 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; 1376 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
1366 1377
1378 /* must set flag prior to save config in order to take effect */
1379 if (ahci_broken_devslp(pdev))
1380 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
1381
1367 /* save initial config */ 1382 /* save initial config */
1368 ahci_pci_save_initial_config(pdev, hpriv); 1383 ahci_pci_save_initial_config(pdev, hpriv);
1369 1384
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index b5eb886da226..af63c75c2001 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -236,6 +236,7 @@ enum {
236 port start (wait until 236 port start (wait until
237 error-handling stage) */ 237 error-handling stage) */
238 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ 238 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
239 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
239 240
240 /* ap->flags bits */ 241 /* ap->flags bits */
241 242
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 497c7abe1c7d..8befeb69eeb1 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -29,9 +29,25 @@
29#include "ahci.h" 29#include "ahci.h"
30 30
31enum { 31enum {
32 PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ 32 /* Timer 1-ms Register */
33 PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ 33 IMX_TIMER1MS = 0x00e0,
34 HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ 34 /* Port0 PHY Control Register */
35 IMX_P0PHYCR = 0x0178,
36 IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
37 IMX_P0PHYCR_CR_READ = 1 << 19,
38 IMX_P0PHYCR_CR_WRITE = 1 << 18,
39 IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
40 IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
41 /* Port0 PHY Status Register */
42 IMX_P0PHYSR = 0x017c,
43 IMX_P0PHYSR_CR_ACK = 1 << 18,
44 IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
45 /* Lane0 Output Status Register */
46 IMX_LANE0_OUT_STAT = 0x2003,
47 IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
48 /* Clock Reset Register */
49 IMX_CLOCK_RESET = 0x7f3f,
50 IMX_CLOCK_RESET_RESET = 1 << 0,
35}; 51};
36 52
37enum ahci_imx_type { 53enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
54 70
55static void ahci_imx_host_stop(struct ata_host *host); 71static void ahci_imx_host_stop(struct ata_host *host);
56 72
73static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
74{
75 int timeout = 10;
76 u32 crval;
77 u32 srval;
78
79 /* Assert or deassert the bit */
80 crval = readl(mmio + IMX_P0PHYCR);
81 if (assert)
82 crval |= bit;
83 else
84 crval &= ~bit;
85 writel(crval, mmio + IMX_P0PHYCR);
86
87 /* Wait for the cr_ack signal */
88 do {
89 srval = readl(mmio + IMX_P0PHYSR);
90 if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
91 break;
92 usleep_range(100, 200);
93 } while (--timeout);
94
95 return timeout ? 0 : -ETIMEDOUT;
96}
97
98static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
99{
100 u32 crval = addr;
101 int ret;
102
103 /* Supply the address on cr_data_in */
104 writel(crval, mmio + IMX_P0PHYCR);
105
106 /* Assert the cr_cap_addr signal */
107 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
108 if (ret)
109 return ret;
110
111 /* Deassert cr_cap_addr */
112 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
113 if (ret)
114 return ret;
115
116 return 0;
117}
118
119static int imx_phy_reg_write(u16 val, void __iomem *mmio)
120{
121 u32 crval = val;
122 int ret;
123
124 /* Supply the data on cr_data_in */
125 writel(crval, mmio + IMX_P0PHYCR);
126
127 /* Assert the cr_cap_data signal */
128 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
129 if (ret)
130 return ret;
131
132 /* Deassert cr_cap_data */
133 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
134 if (ret)
135 return ret;
136
137 if (val & IMX_CLOCK_RESET_RESET) {
138 /*
139 * In case we're resetting the phy, it's unable to acknowledge,
140 * so we return immediately here.
141 */
142 crval |= IMX_P0PHYCR_CR_WRITE;
143 writel(crval, mmio + IMX_P0PHYCR);
144 goto out;
145 }
146
147 /* Assert the cr_write signal */
148 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
149 if (ret)
150 return ret;
151
152 /* Deassert cr_write */
153 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
154 if (ret)
155 return ret;
156
157out:
158 return 0;
159}
160
161static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
162{
163 int ret;
164
165 /* Assert the cr_read signal */
166 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
167 if (ret)
168 return ret;
169
170 /* Capture the data from cr_data_out[] */
171 *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
172
173 /* Deassert cr_read */
174 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
175 if (ret)
176 return ret;
177
178 return 0;
179}
180
181static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
182{
183 void __iomem *mmio = hpriv->mmio;
184 int timeout = 10;
185 u16 val;
186 int ret;
187
188 /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
189 ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
190 if (ret)
191 return ret;
192 ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
193 if (ret)
194 return ret;
195
196 /* Wait for PHY RX_PLL to be stable */
197 do {
198 usleep_range(100, 200);
199 ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
200 if (ret)
201 return ret;
202 ret = imx_phy_reg_read(&val, mmio);
203 if (ret)
204 return ret;
205 if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
206 break;
207 } while (--timeout);
208
209 return timeout ? 0 : -ETIMEDOUT;
210}
211
57static int imx_sata_enable(struct ahci_host_priv *hpriv) 212static int imx_sata_enable(struct ahci_host_priv *hpriv)
58{ 213{
59 struct imx_ahci_priv *imxpriv = hpriv->plat_data; 214 struct imx_ahci_priv *imxpriv = hpriv->plat_data;
215 struct device *dev = &imxpriv->ahci_pdev->dev;
60 int ret; 216 int ret;
61 217
62 if (imxpriv->no_device) 218 if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
101 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, 257 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
102 IMX6Q_GPR13_SATA_MPLL_CLK_EN, 258 IMX6Q_GPR13_SATA_MPLL_CLK_EN,
103 IMX6Q_GPR13_SATA_MPLL_CLK_EN); 259 IMX6Q_GPR13_SATA_MPLL_CLK_EN);
260
261 usleep_range(100, 200);
262
263 ret = imx_sata_phy_reset(hpriv);
264 if (ret) {
265 dev_err(dev, "failed to reset phy: %d\n", ret);
266 goto disable_regulator;
267 }
104 } 268 }
105 269
106 usleep_range(1000, 2000); 270 usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
156 * without full reset once the pddq mode is enabled making it 320 * without full reset once the pddq mode is enabled making it
157 * impossible to use as part of libata LPM. 321 * impossible to use as part of libata LPM.
158 */ 322 */
159 reg_val = readl(mmio + PORT_PHY_CTL); 323 reg_val = readl(mmio + IMX_P0PHYCR);
160 writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); 324 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
161 imx_sata_disable(hpriv); 325 imx_sata_disable(hpriv);
162 imxpriv->no_device = true; 326 imxpriv->no_device = true;
163} 327}
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
217 if (!imxpriv) 381 if (!imxpriv)
218 return -ENOMEM; 382 return -ENOMEM;
219 383
384 imxpriv->ahci_pdev = pdev;
220 imxpriv->no_device = false; 385 imxpriv->no_device = false;
221 imxpriv->first_time = true; 386 imxpriv->first_time = true;
222 imxpriv->type = (enum ahci_imx_type)of_id->data; 387 imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
248 413
249 /* 414 /*
250 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, 415 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
251 * and IP vendor specific register HOST_TIMER1MS. 416 * and IP vendor specific register IMX_TIMER1MS.
252 * Configure CAP_SSS (support stagered spin up). 417 * Configure CAP_SSS (support stagered spin up).
253 * Implement the port0. 418 * Implement the port0.
254 * Get the ahb clock rate, and configure the TIMER1MS register. 419 * Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
265 } 430 }
266 431
267 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; 432 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
268 writel(reg_val, hpriv->mmio + HOST_TIMER1MS); 433 writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
269 434
270 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); 435 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
271 if (ret) 436 if (ret)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 6bd4f660b4e1..b9861453fc81 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
452 cap &= ~HOST_CAP_SNTF; 452 cap &= ~HOST_CAP_SNTF;
453 } 453 }
454 454
455 if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
456 dev_info(dev,
457 "controller can't do DEVSLP, turning off\n");
458 cap2 &= ~HOST_CAP2_SDS;
459 cap2 &= ~HOST_CAP2_SADM;
460 }
461
455 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { 462 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
456 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); 463 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
457 cap |= HOST_CAP_FBS; 464 cap |= HOST_CAP_FBS;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 943cc8b83e59..ea83828bfea9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
6314static void ata_port_detach(struct ata_port *ap) 6314static void ata_port_detach(struct ata_port *ap)
6315{ 6315{
6316 unsigned long flags; 6316 unsigned long flags;
6317 struct ata_link *link;
6318 struct ata_device *dev;
6317 6319
6318 if (!ap->ops->error_handler) 6320 if (!ap->ops->error_handler)
6319 goto skip_eh; 6321 goto skip_eh;
@@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
6333 cancel_delayed_work_sync(&ap->hotplug_task); 6335 cancel_delayed_work_sync(&ap->hotplug_task);
6334 6336
6335 skip_eh: 6337 skip_eh:
6338 /* clean up zpodd on port removal */
6339 ata_for_each_link(link, ap, HOST_FIRST) {
6340 ata_for_each_dev(dev, link, ALL) {
6341 if (zpodd_dev_enabled(dev))
6342 zpodd_exit(dev);
6343 }
6344 }
6336 if (ap->pmp_link) { 6345 if (ap->pmp_link) {
6337 int i; 6346 int i;
6338 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6347 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..cb9b1f8326c3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
145 break; 145 break;
146 } while (!virtqueue_enable_cb(vq)); 146 } while (!virtqueue_enable_cb(vq));
147 spin_unlock_irqrestore(&vblk->vq_lock, flags);
148 147
149 /* In case queue is stopped waiting for more buffers. */ 148 /* In case queue is stopped waiting for more buffers. */
150 if (req_done) 149 if (req_done)
151 blk_mq_start_stopped_hw_queues(vblk->disk->queue); 150 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
151 spin_unlock_irqrestore(&vblk->vq_lock, flags);
152} 152}
153 153
154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); 202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
203 if (err) { 203 if (err) {
204 virtqueue_kick(vblk->vq); 204 virtqueue_kick(vblk->vq);
205 spin_unlock_irqrestore(&vblk->vq_lock, flags);
206 blk_mq_stop_hw_queue(hctx); 205 blk_mq_stop_hw_queue(hctx);
206 spin_unlock_irqrestore(&vblk->vq_lock, flags);
207 /* Out of mem doesn't actually happen, since we fall back 207 /* Out of mem doesn't actually happen, since we fall back
208 * to direct descriptors */ 208 * to direct descriptors */
209 if (err == -ENOMEM || err == -ENOSPC) 209 if (err == -ENOMEM || err == -ENOSPC)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b75713d953a..102c50d38902 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -995,8 +995,11 @@ retry:
995 ibytes = min_t(size_t, ibytes, have_bytes - reserved); 995 ibytes = min_t(size_t, ibytes, have_bytes - reserved);
996 if (ibytes < min) 996 if (ibytes < min)
997 ibytes = 0; 997 ibytes = 0;
998 entropy_count = max_t(int, 0, 998 if (have_bytes >= ibytes + reserved)
999 entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); 999 entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
1000 else
1001 entropy_count = reserved << (ENTROPY_SHIFT + 3);
1002
1000 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1003 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1001 goto retry; 1004 goto retry;
1002 1005
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index b3ea223585bd..61dcc8011ec7 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
328 /* Cache TPM ACPI handle and version string */ 328 /* Cache TPM ACPI handle and version string */
329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
330 ppi_callback, NULL, NULL, &tpm_ppi_handle); 330 ppi_callback, NULL, NULL, &tpm_ppi_handle);
331 if (tpm_ppi_handle == NULL) 331 return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
332 return -ENODEV;
333
334 return sysfs_create_group(parent, &ppi_attr_grp);
335} 332}
336 333
337void tpm_remove_ppi(struct kobject *parent) 334void tpm_remove_ppi(struct kobject *parent)
338{ 335{
339 sysfs_remove_group(parent, &ppi_attr_grp); 336 if (tpm_ppi_handle)
337 sysfs_remove_group(parent, &ppi_attr_grp);
340} 338}
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index c7607feb18dd..54a06526f64f 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */
27 27
28static bool clk_requires_trigger(struct kona_clk *bcm_clk) 28static bool clk_requires_trigger(struct kona_clk *bcm_clk)
29{ 29{
30 struct peri_clk_data *peri = bcm_clk->peri; 30 struct peri_clk_data *peri = bcm_clk->u.peri;
31 struct bcm_clk_sel *sel; 31 struct bcm_clk_sel *sel;
32 struct bcm_clk_div *div; 32 struct bcm_clk_div *div;
33 33
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
63 u32 limit; 63 u32 limit;
64 64
65 BUG_ON(bcm_clk->type != bcm_clk_peri); 65 BUG_ON(bcm_clk->type != bcm_clk_peri);
66 peri = bcm_clk->peri; 66 peri = bcm_clk->u.peri;
67 name = bcm_clk->name; 67 name = bcm_clk->name;
68 range = bcm_clk->ccu->range; 68 range = bcm_clk->ccu->range;
69 69
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
81 81
82 div = &peri->div; 82 div = &peri->div;
83 if (divider_exists(div)) { 83 if (divider_exists(div)) {
84 if (div->offset > limit) { 84 if (div->u.s.offset > limit) {
85 pr_err("%s: bad divider offset for %s (%u > %u)\n", 85 pr_err("%s: bad divider offset for %s (%u > %u)\n",
86 __func__, name, div->offset, limit); 86 __func__, name, div->u.s.offset, limit);
87 return false; 87 return false;
88 } 88 }
89 } 89 }
90 90
91 div = &peri->pre_div; 91 div = &peri->pre_div;
92 if (divider_exists(div)) { 92 if (divider_exists(div)) {
93 if (div->offset > limit) { 93 if (div->u.s.offset > limit) {
94 pr_err("%s: bad pre-divider offset for %s " 94 pr_err("%s: bad pre-divider offset for %s "
95 "(%u > %u)\n", 95 "(%u > %u)\n",
96 __func__, name, div->offset, limit); 96 __func__, name, div->u.s.offset, limit);
97 return false; 97 return false;
98 } 98 }
99 } 99 }
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
249{ 249{
250 if (divider_is_fixed(div)) { 250 if (divider_is_fixed(div)) {
251 /* Any fixed divider value but 0 is OK */ 251 /* Any fixed divider value but 0 is OK */
252 if (div->fixed == 0) { 252 if (div->u.fixed == 0) {
253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__, 253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
254 field_name, clock_name); 254 field_name, clock_name);
255 return false; 255 return false;
256 } 256 }
257 return true; 257 return true;
258 } 258 }
259 if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) 259 if (!bitfield_valid(div->u.s.shift, div->u.s.width,
260 field_name, clock_name))
260 return false; 261 return false;
261 262
262 if (divider_has_fraction(div)) 263 if (divider_has_fraction(div))
263 if (div->frac_width > div->width) { 264 if (div->u.s.frac_width > div->u.s.width) {
264 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", 265 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
265 __func__, field_name, clock_name, 266 __func__, field_name, clock_name,
266 div->frac_width, div->width); 267 div->u.s.frac_width, div->u.s.width);
267 return false; 268 return false;
268 } 269 }
269 270
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
278 */ 279 */
279static bool kona_dividers_valid(struct kona_clk *bcm_clk) 280static bool kona_dividers_valid(struct kona_clk *bcm_clk)
280{ 281{
281 struct peri_clk_data *peri = bcm_clk->peri; 282 struct peri_clk_data *peri = bcm_clk->u.peri;
282 struct bcm_clk_div *div; 283 struct bcm_clk_div *div;
283 struct bcm_clk_div *pre_div; 284 struct bcm_clk_div *pre_div;
284 u32 limit; 285 u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
295 296
296 limit = BITS_PER_BYTE * sizeof(u32); 297 limit = BITS_PER_BYTE * sizeof(u32);
297 298
298 return div->frac_width + pre_div->frac_width <= limit; 299 return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
299} 300}
300 301
301 302
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
328 if (!peri_clk_data_offsets_valid(bcm_clk)) 329 if (!peri_clk_data_offsets_valid(bcm_clk))
329 return false; 330 return false;
330 331
331 peri = bcm_clk->peri; 332 peri = bcm_clk->u.peri;
332 name = bcm_clk->name; 333 name = bcm_clk->name;
333 gate = &peri->gate; 334 gate = &peri->gate;
334 if (gate_exists(gate) && !gate_valid(gate, "gate", name)) 335 if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
588{ 589{
589 switch (bcm_clk->type) { 590 switch (bcm_clk->type) {
590 case bcm_clk_peri: 591 case bcm_clk_peri:
591 peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); 592 peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
592 break; 593 break;
593 default: 594 default:
594 break; 595 break;
595 } 596 }
596 bcm_clk->data = NULL; 597 bcm_clk->u.data = NULL;
597 bcm_clk->type = bcm_clk_none; 598 bcm_clk->type = bcm_clk_none;
598} 599}
599 600
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
644 break; 645 break;
645 } 646 }
646 bcm_clk->type = type; 647 bcm_clk->type = type;
647 bcm_clk->data = data; 648 bcm_clk->u.data = data;
648 649
649 /* Make sure everything makes sense before we set it up */ 650 /* Make sure everything makes sense before we set it up */
650 if (!kona_clk_valid(bcm_clk)) { 651 if (!kona_clk_valid(bcm_clk)) {
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index e3d339e08309..db11a87449f2 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
61/* Convert a divider into the scaled divisor value it represents. */ 61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{ 63{
64 return (u64)reg_div + ((u64)1 << div->frac_width); 64 return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
65} 65}
66 66
67/* 67/*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
77 BUG_ON(billionths >= BILLION); 77 BUG_ON(billionths >= BILLION);
78 78
79 combined = (u64)div_value * BILLION + billionths; 79 combined = (u64)div_value * BILLION + billionths;
80 combined <<= div->frac_width; 80 combined <<= div->u.s.frac_width;
81 81
82 return do_div_round_closest(combined, BILLION); 82 return do_div_round_closest(combined, BILLION);
83} 83}
@@ -87,7 +87,7 @@ static inline u64
87scaled_div_min(struct bcm_clk_div *div) 87scaled_div_min(struct bcm_clk_div *div)
88{ 88{
89 if (divider_is_fixed(div)) 89 if (divider_is_fixed(div))
90 return (u64)div->fixed; 90 return (u64)div->u.fixed;
91 91
92 return scaled_div_value(div, 0); 92 return scaled_div_value(div, 0);
93} 93}
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
98 u32 reg_div; 98 u32 reg_div;
99 99
100 if (divider_is_fixed(div)) 100 if (divider_is_fixed(div))
101 return (u64)div->fixed; 101 return (u64)div->u.fixed;
102 102
103 reg_div = ((u32)1 << div->width) - 1; 103 reg_div = ((u32)1 << div->u.s.width) - 1;
104 104
105 return scaled_div_value(div, reg_div); 105 return scaled_div_value(div, reg_div);
106} 106}
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
115 BUG_ON(scaled_div < scaled_div_min(div)); 115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div)); 116 BUG_ON(scaled_div > scaled_div_max(div));
117 117
118 return (u32)(scaled_div - ((u64)1 << div->frac_width)); 118 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
119} 119}
120 120
121/* Return a rate scaled for use when dividing by a scaled divisor. */ 121/* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
125 if (divider_is_fixed(div)) 125 if (divider_is_fixed(div))
126 return (u64)rate; 126 return (u64)rate;
127 127
128 return (u64)rate << div->frac_width; 128 return (u64)rate << div->u.s.frac_width;
129} 129}
130 130
131/* CCU access */ 131/* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
398 u32 reg_div; 398 u32 reg_div;
399 399
400 if (divider_is_fixed(div)) 400 if (divider_is_fixed(div))
401 return (u64)div->fixed; 401 return (u64)div->u.fixed;
402 402
403 flags = ccu_lock(ccu); 403 flags = ccu_lock(ccu);
404 reg_val = __ccu_read(ccu, div->offset); 404 reg_val = __ccu_read(ccu, div->u.s.offset);
405 ccu_unlock(ccu, flags); 405 ccu_unlock(ccu, flags);
406 406
407 /* Extract the full divider field from the register value */ 407 /* Extract the full divider field from the register value */
408 reg_div = bitfield_extract(reg_val, div->shift, div->width); 408 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
409 409
410 /* Return the scaled divisor value it represents */ 410 /* Return the scaled divisor value it represents */
411 return scaled_div_value(div, reg_div); 411 return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
433 * state was defined in the device tree, we just find out 433 * state was defined in the device tree, we just find out
434 * what its current value is rather than updating it. 434 * what its current value is rather than updating it.
435 */ 435 */
436 if (div->scaled_div == BAD_SCALED_DIV_VALUE) { 436 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
437 reg_val = __ccu_read(ccu, div->offset); 437 reg_val = __ccu_read(ccu, div->u.s.offset);
438 reg_div = bitfield_extract(reg_val, div->shift, div->width); 438 reg_div = bitfield_extract(reg_val, div->u.s.shift,
439 div->scaled_div = scaled_div_value(div, reg_div); 439 div->u.s.width);
440 div->u.s.scaled_div = scaled_div_value(div, reg_div);
440 441
441 return 0; 442 return 0;
442 } 443 }
443 444
444 /* Convert the scaled divisor to the value we need to record */ 445 /* Convert the scaled divisor to the value we need to record */
445 reg_div = divider(div, div->scaled_div); 446 reg_div = divider(div, div->u.s.scaled_div);
446 447
447 /* Clock needs to be enabled before changing the rate */ 448 /* Clock needs to be enabled before changing the rate */
448 enabled = __is_clk_gate_enabled(ccu, gate); 449 enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
452 } 453 }
453 454
454 /* Replace the divider value and record the result */ 455 /* Replace the divider value and record the result */
455 reg_val = __ccu_read(ccu, div->offset); 456 reg_val = __ccu_read(ccu, div->u.s.offset);
456 reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); 457 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
457 __ccu_write(ccu, div->offset, reg_val); 458 reg_div);
459 __ccu_write(ccu, div->u.s.offset, reg_val);
458 460
459 /* If the trigger fails we still want to disable the gate */ 461 /* If the trigger fails we still want to disable the gate */
460 if (!__clk_trigger(ccu, trig)) 462 if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
490 492
491 BUG_ON(divider_is_fixed(div)); 493 BUG_ON(divider_is_fixed(div));
492 494
493 previous = div->scaled_div; 495 previous = div->u.s.scaled_div;
494 if (previous == scaled_div) 496 if (previous == scaled_div)
495 return 0; /* No change */ 497 return 0; /* No change */
496 498
497 div->scaled_div = scaled_div; 499 div->u.s.scaled_div = scaled_div;
498 500
499 flags = ccu_lock(ccu); 501 flags = ccu_lock(ccu);
500 __ccu_write_enable(ccu); 502 __ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
505 ccu_unlock(ccu, flags); 507 ccu_unlock(ccu, flags);
506 508
507 if (ret) 509 if (ret)
508 div->scaled_div = previous; /* Revert the change */ 510 div->u.s.scaled_div = previous; /* Revert the change */
509 511
510 return ret; 512 return ret;
511 513
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
802static int kona_peri_clk_enable(struct clk_hw *hw) 804static int kona_peri_clk_enable(struct clk_hw *hw)
803{ 805{
804 struct kona_clk *bcm_clk = to_kona_clk(hw); 806 struct kona_clk *bcm_clk = to_kona_clk(hw);
805 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 807 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
806 808
807 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); 809 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
808} 810}
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
810static void kona_peri_clk_disable(struct clk_hw *hw) 812static void kona_peri_clk_disable(struct clk_hw *hw)
811{ 813{
812 struct kona_clk *bcm_clk = to_kona_clk(hw); 814 struct kona_clk *bcm_clk = to_kona_clk(hw);
813 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 815 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
814 816
815 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); 817 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
816} 818}
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
818static int kona_peri_clk_is_enabled(struct clk_hw *hw) 820static int kona_peri_clk_is_enabled(struct clk_hw *hw)
819{ 821{
820 struct kona_clk *bcm_clk = to_kona_clk(hw); 822 struct kona_clk *bcm_clk = to_kona_clk(hw);
821 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 823 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
822 824
823 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 825 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
824} 826}
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
827 unsigned long parent_rate) 829 unsigned long parent_rate)
828{ 830{
829 struct kona_clk *bcm_clk = to_kona_clk(hw); 831 struct kona_clk *bcm_clk = to_kona_clk(hw);
830 struct peri_clk_data *data = bcm_clk->peri; 832 struct peri_clk_data *data = bcm_clk->u.peri;
831 833
832 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 834 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
833 parent_rate); 835 parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
837 unsigned long *parent_rate) 839 unsigned long *parent_rate)
838{ 840{
839 struct kona_clk *bcm_clk = to_kona_clk(hw); 841 struct kona_clk *bcm_clk = to_kona_clk(hw);
840 struct bcm_clk_div *div = &bcm_clk->peri->div; 842 struct bcm_clk_div *div = &bcm_clk->u.peri->div;
841 843
842 if (!divider_exists(div)) 844 if (!divider_exists(div))
843 return __clk_get_rate(hw->clk); 845 return __clk_get_rate(hw->clk);
844 846
845 /* Quietly avoid a zero rate */ 847 /* Quietly avoid a zero rate */
846 return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, 848 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
847 rate ? rate : 1, *parent_rate, NULL); 849 rate ? rate : 1, *parent_rate, NULL);
848} 850}
849 851
850static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 852static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
851{ 853{
852 struct kona_clk *bcm_clk = to_kona_clk(hw); 854 struct kona_clk *bcm_clk = to_kona_clk(hw);
853 struct peri_clk_data *data = bcm_clk->peri; 855 struct peri_clk_data *data = bcm_clk->u.peri;
854 struct bcm_clk_sel *sel = &data->sel; 856 struct bcm_clk_sel *sel = &data->sel;
855 struct bcm_clk_trig *trig; 857 struct bcm_clk_trig *trig;
856 int ret; 858 int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
884static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 886static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
885{ 887{
886 struct kona_clk *bcm_clk = to_kona_clk(hw); 888 struct kona_clk *bcm_clk = to_kona_clk(hw);
887 struct peri_clk_data *data = bcm_clk->peri; 889 struct peri_clk_data *data = bcm_clk->u.peri;
888 u8 index; 890 u8 index;
889 891
890 index = selector_read_index(bcm_clk->ccu, &data->sel); 892 index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
897 unsigned long parent_rate) 899 unsigned long parent_rate)
898{ 900{
899 struct kona_clk *bcm_clk = to_kona_clk(hw); 901 struct kona_clk *bcm_clk = to_kona_clk(hw);
900 struct peri_clk_data *data = bcm_clk->peri; 902 struct peri_clk_data *data = bcm_clk->u.peri;
901 struct bcm_clk_div *div = &data->div; 903 struct bcm_clk_div *div = &data->div;
902 u64 scaled_div = 0; 904 u64 scaled_div = 0;
903 int ret; 905 int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
958static bool __peri_clk_init(struct kona_clk *bcm_clk) 960static bool __peri_clk_init(struct kona_clk *bcm_clk)
959{ 961{
960 struct ccu_data *ccu = bcm_clk->ccu; 962 struct ccu_data *ccu = bcm_clk->ccu;
961 struct peri_clk_data *peri = bcm_clk->peri; 963 struct peri_clk_data *peri = bcm_clk->u.peri;
962 const char *name = bcm_clk->name; 964 const char *name = bcm_clk->name;
963 struct bcm_clk_trig *trig; 965 struct bcm_clk_trig *trig;
964 966
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 5e139adc3dc5..dee690951bb6 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -57,7 +57,7 @@
57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) 57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) 58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
59#define divider_has_fraction(div) (!divider_is_fixed(div) && \ 59#define divider_has_fraction(div) (!divider_is_fixed(div) && \
60 (div)->frac_width > 0) 60 (div)->u.s.frac_width > 0)
61 61
62#define selector_exists(sel) ((sel)->width != 0) 62#define selector_exists(sel) ((sel)->width != 0)
63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) 63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
244 u32 frac_width; /* field fraction width */ 244 u32 frac_width; /* field fraction width */
245 245
246 u64 scaled_div; /* scaled divider value */ 246 u64 scaled_div; /* scaled divider value */
247 }; 247 } s;
248 u32 fixed; /* non-zero fixed divider value */ 248 u32 fixed; /* non-zero fixed divider value */
249 }; 249 } u;
250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ 250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
251}; 251};
252 252
@@ -263,28 +263,28 @@ struct bcm_clk_div {
263/* A fixed (non-zero) divider */ 263/* A fixed (non-zero) divider */
264#define FIXED_DIVIDER(_value) \ 264#define FIXED_DIVIDER(_value) \
265 { \ 265 { \
266 .fixed = (_value), \ 266 .u.fixed = (_value), \
267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ 267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
268 } 268 }
269 269
270/* A divider with an integral divisor */ 270/* A divider with an integral divisor */
271#define DIVIDER(_offset, _shift, _width) \ 271#define DIVIDER(_offset, _shift, _width) \
272 { \ 272 { \
273 .offset = (_offset), \ 273 .u.s.offset = (_offset), \
274 .shift = (_shift), \ 274 .u.s.shift = (_shift), \
275 .width = (_width), \ 275 .u.s.width = (_width), \
276 .scaled_div = BAD_SCALED_DIV_VALUE, \ 276 .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
277 .flags = FLAG(DIV, EXISTS), \ 277 .flags = FLAG(DIV, EXISTS), \
278 } 278 }
279 279
280/* A divider whose divisor has an integer and fractional part */ 280/* A divider whose divisor has an integer and fractional part */
281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ 281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
282 { \ 282 { \
283 .offset = (_offset), \ 283 .u.s.offset = (_offset), \
284 .shift = (_shift), \ 284 .u.s.shift = (_shift), \
285 .width = (_width), \ 285 .u.s.width = (_width), \
286 .frac_width = (_frac_width), \ 286 .u.s.frac_width = (_frac_width), \
287 .scaled_div = BAD_SCALED_DIV_VALUE, \ 287 .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
288 .flags = FLAG(DIV, EXISTS), \ 288 .flags = FLAG(DIV, EXISTS), \
289 } 289 }
290 290
@@ -380,7 +380,7 @@ struct kona_clk {
380 union { 380 union {
381 void *data; 381 void *data;
382 struct peri_clk_data *peri; 382 struct peri_clk_data *peri;
383 }; 383 } u;
384}; 384};
385#define to_kona_clk(_hw) \ 385#define to_kona_clk(_hw) \
386 container_of(_hw, struct kona_clk, hw) 386 container_of(_hw, struct kona_clk, hw)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index ec22112e569f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
144 return true; 144 return true;
145} 145}
146 146
147static int _round_up_table(const struct clk_div_table *table, int div)
148{
149 const struct clk_div_table *clkt;
150 int up = INT_MAX;
151
152 for (clkt = table; clkt->div; clkt++) {
153 if (clkt->div == div)
154 return clkt->div;
155 else if (clkt->div < div)
156 continue;
157
158 if ((clkt->div - div) < (up - div))
159 up = clkt->div;
160 }
161
162 return up;
163}
164
165static int _div_round_up(struct clk_divider *divider,
166 unsigned long parent_rate, unsigned long rate)
167{
168 int div = DIV_ROUND_UP(parent_rate, rate);
169
170 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
171 div = __roundup_pow_of_two(div);
172 if (divider->table)
173 div = _round_up_table(divider->table, div);
174
175 return div;
176}
177
147static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 178static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
148 unsigned long *best_parent_rate) 179 unsigned long *best_parent_rate)
149{ 180{
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
159 190
160 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 191 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
161 parent_rate = *best_parent_rate; 192 parent_rate = *best_parent_rate;
162 bestdiv = DIV_ROUND_UP(parent_rate, rate); 193 bestdiv = _div_round_up(divider, parent_rate, rate);
163 bestdiv = bestdiv == 0 ? 1 : bestdiv; 194 bestdiv = bestdiv == 0 ? 1 : bestdiv;
164 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 195 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
165 return bestdiv; 196 return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
219 u32 val; 250 u32 val;
220 251
221 div = DIV_ROUND_UP(parent_rate, rate); 252 div = DIV_ROUND_UP(parent_rate, rate);
253
254 if (!_is_valid_div(divider, div))
255 return -EINVAL;
256
222 value = _get_val(divider, div); 257 value = _get_val(divider, div);
223 258
224 if (value > div_mask(divider)) 259 if (value > div_mask(divider))
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dff0373f53c1..7cf2c093cc54 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1984} 1984}
1985EXPORT_SYMBOL_GPL(__clk_register); 1985EXPORT_SYMBOL_GPL(__clk_register);
1986 1986
1987static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) 1987/**
1988 * clk_register - allocate a new clock, register it and return an opaque cookie
1989 * @dev: device that is registering this clock
1990 * @hw: link to hardware-specific clock data
1991 *
1992 * clk_register is the primary interface for populating the clock tree with new
1993 * clock nodes. It returns a pointer to the newly allocated struct clk which
1994 * cannot be dereferenced by driver code but may be used in conjuction with the
1995 * rest of the clock API. In the event of an error clk_register will return an
1996 * error code; drivers must test for an error code after calling clk_register.
1997 */
1998struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1988{ 1999{
1989 int i, ret; 2000 int i, ret;
2001 struct clk *clk;
2002
2003 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2004 if (!clk) {
2005 pr_err("%s: could not allocate clk\n", __func__);
2006 ret = -ENOMEM;
2007 goto fail_out;
2008 }
1990 2009
1991 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 2010 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1992 if (!clk->name) { 2011 if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
2026 2045
2027 ret = __clk_init(dev, clk); 2046 ret = __clk_init(dev, clk);
2028 if (!ret) 2047 if (!ret)
2029 return 0; 2048 return clk;
2030 2049
2031fail_parent_names_copy: 2050fail_parent_names_copy:
2032 while (--i >= 0) 2051 while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
2035fail_parent_names: 2054fail_parent_names:
2036 kfree(clk->name); 2055 kfree(clk->name);
2037fail_name: 2056fail_name:
2038 return ret;
2039}
2040
2041/**
2042 * clk_register - allocate a new clock, register it and return an opaque cookie
2043 * @dev: device that is registering this clock
2044 * @hw: link to hardware-specific clock data
2045 *
2046 * clk_register is the primary interface for populating the clock tree with new
2047 * clock nodes. It returns a pointer to the newly allocated struct clk which
2048 * cannot be dereferenced by driver code but may be used in conjuction with the
2049 * rest of the clock API. In the event of an error clk_register will return an
2050 * error code; drivers must test for an error code after calling clk_register.
2051 */
2052struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2053{
2054 int ret;
2055 struct clk *clk;
2056
2057 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2058 if (!clk) {
2059 pr_err("%s: could not allocate clk\n", __func__);
2060 ret = -ENOMEM;
2061 goto fail_out;
2062 }
2063
2064 ret = _clk_register(dev, hw, clk);
2065 if (!ret)
2066 return clk;
2067
2068 kfree(clk); 2057 kfree(clk);
2069fail_out: 2058fail_out:
2070 return ERR_PTR(ret); 2059 return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
2151 2140
2152 if (!hlist_empty(&clk->children)) { 2141 if (!hlist_empty(&clk->children)) {
2153 struct clk *child; 2142 struct clk *child;
2143 struct hlist_node *t;
2154 2144
2155 /* Reparent all children to the orphan list. */ 2145 /* Reparent all children to the orphan list. */
2156 hlist_for_each_entry(child, &clk->children, child_node) 2146 hlist_for_each_entry_safe(child, t, &clk->children, child_node)
2157 clk_set_parent(child, NULL); 2147 clk_set_parent(child, NULL);
2158 } 2148 }
2159 2149
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
2173 2163
2174static void devm_clk_release(struct device *dev, void *res) 2164static void devm_clk_release(struct device *dev, void *res)
2175{ 2165{
2176 clk_unregister(res); 2166 clk_unregister(*(struct clk **)res);
2177} 2167}
2178 2168
2179/** 2169/**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
2188struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2178struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2189{ 2179{
2190 struct clk *clk; 2180 struct clk *clk;
2191 int ret; 2181 struct clk **clkp;
2192 2182
2193 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); 2183 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2194 if (!clk) 2184 if (!clkp)
2195 return ERR_PTR(-ENOMEM); 2185 return ERR_PTR(-ENOMEM);
2196 2186
2197 ret = _clk_register(dev, hw, clk); 2187 clk = clk_register(dev, hw);
2198 if (!ret) { 2188 if (!IS_ERR(clk)) {
2199 devres_add(dev, clk); 2189 *clkp = clk;
2190 devres_add(dev, clkp);
2200 } else { 2191 } else {
2201 devres_free(clk); 2192 devres_free(clkp);
2202 clk = ERR_PTR(ret);
2203 } 2193 }
2204 2194
2205 return clk; 2195 return clk;
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 2e5810c88d11..1f6324e29a80 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
156static void __init cpg_mstp_clocks_init(struct device_node *np) 156static void __init cpg_mstp_clocks_init(struct device_node *np)
157{ 157{
158 struct mstp_clock_group *group; 158 struct mstp_clock_group *group;
159 const char *idxname;
159 struct clk **clks; 160 struct clk **clks;
160 unsigned int i; 161 unsigned int i;
161 162
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
184 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) 185 for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
185 clks[i] = ERR_PTR(-ENOENT); 186 clks[i] = ERR_PTR(-ENOENT);
186 187
188 if (of_find_property(np, "clock-indices", &i))
189 idxname = "clock-indices";
190 else
191 idxname = "renesas,clock-indices";
192
187 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { 193 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
188 const char *parent_name; 194 const char *parent_name;
189 const char *name; 195 const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
197 continue; 203 continue;
198 204
199 parent_name = of_clk_get_parent_name(np, i); 205 parent_name = of_clk_get_parent_name(np, i);
200 ret = of_property_read_u32_index(np, "renesas,clock-indices", i, 206 ret = of_property_read_u32_index(np, idxname, i, &clkidx);
201 &clkidx);
202 if (parent_name == NULL || ret < 0) 207 if (parent_name == NULL || ret < 0)
203 break; 208 break;
204 209
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 88dafb5e9627..de6da957a09d 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -20,6 +20,7 @@
20#include <linux/clk-provider.h> 20#include <linux/clk-provider.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_address.h>
23 24
24#include "clk.h" 25#include "clk.h"
25 26
@@ -43,6 +44,8 @@
43 44
44#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) 45#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
45 46
47void __iomem *clk_mgr_base_addr;
48
46static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, 49static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
47 unsigned long parent_rate) 50 unsigned long parent_rate)
48{ 51{
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
87 const char *clk_name = node->name; 90 const char *clk_name = node->name;
88 const char *parent_name[SOCFPGA_MAX_PARENTS]; 91 const char *parent_name[SOCFPGA_MAX_PARENTS];
89 struct clk_init_data init; 92 struct clk_init_data init;
93 struct device_node *clkmgr_np;
90 int rc; 94 int rc;
91 int i = 0; 95 int i = 0;
92 96
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
96 if (WARN_ON(!pll_clk)) 100 if (WARN_ON(!pll_clk))
97 return NULL; 101 return NULL;
98 102
103 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
104 clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
105 BUG_ON(!clk_mgr_base_addr);
99 pll_clk->hw.reg = clk_mgr_base_addr + reg; 106 pll_clk->hw.reg = clk_mgr_base_addr + reg;
100 107
101 of_property_read_string(node, "clock-output-names", &clk_name); 108 of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 35a960a993f9..43db947e5f0e 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -17,28 +17,11 @@
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */ 19 */
20#include <linux/clk.h>
21#include <linux/clkdev.h>
22#include <linux/clk-provider.h>
23#include <linux/io.h>
24#include <linux/of.h> 20#include <linux/of.h>
25#include <linux/of_address.h>
26 21
27#include "clk.h" 22#include "clk.h"
28 23
29void __iomem *clk_mgr_base_addr; 24CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
30 25CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
31static const struct of_device_id socfpga_child_clocks[] __initconst = { 26CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
32 { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
33 { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
34 { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
35 {},
36};
37
38static void __init socfpga_clkmgr_init(struct device_node *node)
39{
40 clk_mgr_base_addr = of_iomap(node, 0);
41 of_clk_init(socfpga_child_clocks);
42}
43CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
44 27
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
521 gate->lock = odf_lock; 521 gate->lock = odf_lock;
522 522
523 div = kzalloc(sizeof(*div), GFP_KERNEL); 523 div = kzalloc(sizeof(*div), GFP_KERNEL);
524 if (!div) 524 if (!div) {
525 kfree(gate);
525 return ERR_PTR(-ENOMEM); 526 return ERR_PTR(-ENOMEM);
527 }
526 528
527 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; 529 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
528 div->reg = reg + pll_data->odf[odf].offset; 530 div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 0d20241e0770..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
58#define PLLDU_LFCON_SET_DIVN 600 58#define PLLDU_LFCON_SET_DIVN 600
59 59
60#define PLLE_BASE_DIVCML_SHIFT 24 60#define PLLE_BASE_DIVCML_SHIFT 24
61#define PLLE_BASE_DIVCML_WIDTH 4 61#define PLLE_BASE_DIVCML_MASK 0xf
62#define PLLE_BASE_DIVP_SHIFT 16 62#define PLLE_BASE_DIVP_SHIFT 16
63#define PLLE_BASE_DIVP_WIDTH 7 63#define PLLE_BASE_DIVP_WIDTH 6
64#define PLLE_BASE_DIVN_SHIFT 8 64#define PLLE_BASE_DIVN_SHIFT 8
65#define PLLE_BASE_DIVN_WIDTH 8 65#define PLLE_BASE_DIVN_WIDTH 8
66#define PLLE_BASE_DIVM_SHIFT 0 66#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ 183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
184 mask(p->params->div_nmp->divp_width)) 184 mask(p->params->div_nmp->divp_width))
185 185
186#define divm_shift(p) (p)->params->div_nmp->divm_shift
187#define divn_shift(p) (p)->params->div_nmp->divn_shift
188#define divp_shift(p) (p)->params->div_nmp->divp_shift
189
190#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
191#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
192#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
193
186#define divm_max(p) (divm_mask(p)) 194#define divm_max(p) (divm_mask(p))
187#define divn_max(p) (divn_mask(p)) 195#define divn_max(p) (divn_mask(p))
188#define divp_max(p) (1 << (divp_mask(p))) 196#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
476 } else { 484 } else {
477 val = pll_readl_base(pll); 485 val = pll_readl_base(pll);
478 486
479 val &= ~((divm_mask(pll) << div_nmp->divm_shift) | 487 val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
480 (divn_mask(pll) << div_nmp->divn_shift) | 488 divp_mask_shifted(pll));
481 (divp_mask(pll) << div_nmp->divp_shift));
482 489
483 val |= ((cfg->m << div_nmp->divm_shift) | 490 val |= (cfg->m << divm_shift(pll)) |
484 (cfg->n << div_nmp->divn_shift) | 491 (cfg->n << divn_shift(pll)) |
485 (cfg->p << div_nmp->divp_shift)); 492 (cfg->p << divp_shift(pll));
486 493
487 pll_writel_base(val, pll); 494 pll_writel_base(val, pll);
488 } 495 }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
730 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { 737 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
731 /* configure dividers */ 738 /* configure dividers */
732 val = pll_readl_base(pll); 739 val = pll_readl_base(pll);
733 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 740 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
734 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 741 divm_mask_shifted(pll));
735 val |= sel.m << pll->params->div_nmp->divm_shift; 742 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
736 val |= sel.n << pll->params->div_nmp->divn_shift; 743 val |= sel.m << divm_shift(pll);
737 val |= sel.p << pll->params->div_nmp->divp_shift; 744 val |= sel.n << divn_shift(pll);
745 val |= sel.p << divp_shift(pll);
738 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 746 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
739 pll_writel_base(val, pll); 747 pll_writel_base(val, pll);
740 } 748 }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
745 pll_writel_misc(val, pll); 753 pll_writel_misc(val, pll);
746 754
747 val = readl(pll->clk_base + PLLE_SS_CTRL); 755 val = readl(pll->clk_base + PLLE_SS_CTRL);
756 val &= ~PLLE_SS_COEFFICIENTS_MASK;
748 val |= PLLE_SS_DISABLE; 757 val |= PLLE_SS_DISABLE;
749 writel(val, pll->clk_base + PLLE_SS_CTRL); 758 writel(val, pll->clk_base + PLLE_SS_CTRL);
750 759
751 val |= pll_readl_base(pll); 760 val = pll_readl_base(pll);
752 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); 761 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
753 pll_writel_base(val, pll); 762 pll_writel_base(val, pll);
754 763
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1292 pll_writel(val, PLLE_SS_CTRL, pll); 1301 pll_writel(val, PLLE_SS_CTRL, pll);
1293 1302
1294 val = pll_readl_base(pll); 1303 val = pll_readl_base(pll);
1295 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 1304 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
1296 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 1305 divm_mask_shifted(pll));
1297 val |= sel.m << pll->params->div_nmp->divm_shift; 1306 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
1298 val |= sel.n << pll->params->div_nmp->divn_shift; 1307 val |= sel.m << divm_shift(pll);
1308 val |= sel.n << divn_shift(pll);
1299 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 1309 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
1300 pll_writel_base(val, pll); 1310 pll_writel_base(val, pll);
1301 udelay(1); 1311 udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
1410 return clk; 1420 return clk;
1411} 1421}
1412 1422
1423static struct div_nmp pll_e_nmp = {
1424 .divn_shift = PLLE_BASE_DIVN_SHIFT,
1425 .divn_width = PLLE_BASE_DIVN_WIDTH,
1426 .divm_shift = PLLE_BASE_DIVM_SHIFT,
1427 .divm_width = PLLE_BASE_DIVM_WIDTH,
1428 .divp_shift = PLLE_BASE_DIVP_SHIFT,
1429 .divp_width = PLLE_BASE_DIVP_WIDTH,
1430};
1431
1413struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, 1432struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1414 void __iomem *clk_base, void __iomem *pmc, 1433 void __iomem *clk_base, void __iomem *pmc,
1415 unsigned long flags, struct tegra_clk_pll_params *pll_params, 1434 unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1420 1439
1421 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; 1440 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
1422 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; 1441 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
1442
1443 if (!pll_params->div_nmp)
1444 pll_params->div_nmp = &pll_e_nmp;
1445
1423 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); 1446 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
1424 if (IS_ERR(pll)) 1447 if (IS_ERR(pll))
1425 return ERR_CAST(pll); 1448 return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
1557 int m; 1580 int m;
1558 1581
1559 m = _pll_fixed_mdiv(pll_params, parent_rate); 1582 m = _pll_fixed_mdiv(pll_params, parent_rate);
1560 val = m << PLL_BASE_DIVM_SHIFT; 1583 val = m << divm_shift(pll);
1561 val |= (pll_params->vco_min / parent_rate) 1584 val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
1562 << PLL_BASE_DIVN_SHIFT;
1563 pll_writel_base(val, pll); 1585 pll_writel_base(val, pll);
1564 } 1586 }
1565 1587
@@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
1718 "pll_re_vco"); 1740 "pll_re_vco");
1719 } else { 1741 } else {
1720 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); 1742 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
1721 pll_writel(val, pll_params->aux_reg, pll); 1743 pll_writel(val_aux, pll_params->aux_reg, pll);
1722 } 1744 }
1723 1745
1724 clk = _tegra_clk_register_pll(pll, name, parent_name, flags, 1746 clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { 100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
103 clk_disable_unprepare(tcd->clk); 103 clk_disable(tcd->clk);
104 } 104 }
105 105
106 switch (m) { 106 switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
109 * of oneshot, we get lower overhead and improved accuracy. 109 * of oneshot, we get lower overhead and improved accuracy.
110 */ 110 */
111 case CLOCK_EVT_MODE_PERIODIC: 111 case CLOCK_EVT_MODE_PERIODIC:
112 clk_prepare_enable(tcd->clk); 112 clk_enable(tcd->clk);
113 113
114 /* slow clock, count up to RC, then irq and restart */ 114 /* slow clock, count up to RC, then irq and restart */
115 __raw_writel(timer_clock 115 __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
126 break; 126 break;
127 127
128 case CLOCK_EVT_MODE_ONESHOT: 128 case CLOCK_EVT_MODE_ONESHOT:
129 clk_prepare_enable(tcd->clk); 129 clk_enable(tcd->clk);
130 130
131 /* slow clock, count up to RC, then irq and stop */ 131 /* slow clock, count up to RC, then irq and stop */
132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP 132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
194 ret = clk_prepare_enable(t2_clk); 194 ret = clk_prepare_enable(t2_clk);
195 if (ret) 195 if (ret)
196 return ret; 196 return ret;
197 clk_disable_unprepare(t2_clk); 197 clk_disable(t2_clk);
198 198
199 clkevt.regs = tc->regs; 199 clkevt.regs = tc->regs;
200 clkevt.clk = t2_clk; 200 clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..7f5374dbefd9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
199 199
200 action->dev_id = ce; 200 action->dev_id = ce;
201 BUG_ON(setup_irq(ce->irq, action)); 201 BUG_ON(setup_irq(ce->irq, action));
202 irq_set_affinity(action->irq, cpumask_of(cpu)); 202 irq_force_affinity(action->irq, cpumask_of(cpu));
203 203
204 clockevents_register_device(ce); 204 clockevents_register_device(ce);
205 return 0; 205 return 0;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
130 return -ENOENT; 130 return -ENOENT;
131 } 131 }
132 132
133 cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); 133 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
134 if (IS_ERR(cpu_reg)) { 134 if (IS_ERR(cpu_reg)) {
135 /* 135 /*
136 * If cpu0 regulator supply node is present, but regulator is 136 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
145 PTR_ERR(cpu_reg)); 145 PTR_ERR(cpu_reg));
146 } 146 }
147 147
148 cpu_clk = devm_clk_get(cpu_dev, NULL); 148 cpu_clk = clk_get(cpu_dev, NULL);
149 if (IS_ERR(cpu_clk)) { 149 if (IS_ERR(cpu_clk)) {
150 ret = PTR_ERR(cpu_clk); 150 ret = PTR_ERR(cpu_clk);
151 pr_err("failed to get cpu0 clock: %d\n", ret); 151 pr_err("failed to get cpu0 clock: %d\n", ret);
152 goto out_put_node; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 ret = of_init_opp_table(cpu_dev);
156 if (ret) { 156 if (ret) {
157 pr_err("failed to init OPP table: %d\n", ret); 157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_node; 158 goto out_put_clk;
159 } 159 }
160 160
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 162 if (ret) {
163 pr_err("failed to init cpufreq table: %d\n", ret); 163 pr_err("failed to init cpufreq table: %d\n", ret);
164 goto out_put_node; 164 goto out_put_clk;
165 } 165 }
166 166
167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); 167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
216 216
217out_free_table: 217out_free_table:
218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
219out_put_clk:
220 if (!IS_ERR(cpu_clk))
221 clk_put(cpu_clk);
222out_put_reg:
223 if (!IS_ERR(cpu_reg))
224 regulator_put(cpu_reg);
219out_put_node: 225out_put_node:
220 of_node_put(np); 226 of_node_put(np);
221 return ret; 227 return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
366 break; 366 break;
367 367
368 case CPUFREQ_GOV_LIMITS: 368 case CPUFREQ_GOV_LIMITS:
369 mutex_lock(&dbs_data->mutex);
370 if (!cpu_cdbs->cur_policy) {
371 mutex_unlock(&dbs_data->mutex);
372 break;
373 }
369 mutex_lock(&cpu_cdbs->timer_mutex); 374 mutex_lock(&cpu_cdbs->timer_mutex);
370 if (policy->max < cpu_cdbs->cur_policy->cur) 375 if (policy->max < cpu_cdbs->cur_policy->cur)
371 __cpufreq_driver_target(cpu_cdbs->cur_policy, 376 __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
375 policy->min, CPUFREQ_RELATION_L); 380 policy->min, CPUFREQ_RELATION_L);
376 dbs_check_cpu(dbs_data, cpu); 381 dbs_check_cpu(dbs_data, cpu);
377 mutex_unlock(&cpu_cdbs->timer_mutex); 382 mutex_unlock(&cpu_cdbs->timer_mutex);
383 mutex_unlock(&dbs_data->mutex);
378 break; 384 break;
379 } 385 }
380 return 0; 386 return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 099967302bf2..eab8ccfe6beb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,7 @@
37#define BYT_RATIOS 0x66a 37#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b 38#define BYT_VIDS 0x66b
39#define BYT_TURBO_RATIOS 0x66c 39#define BYT_TURBO_RATIOS 0x66c
40#define BYT_TURBO_VIDS 0x66d
40 41
41 42
42#define FRAC_BITS 6 43#define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
70}; 71};
71 72
72struct vid_data { 73struct vid_data {
73 int32_t min; 74 int min;
74 int32_t max; 75 int max;
76 int turbo;
75 int32_t ratio; 77 int32_t ratio;
76}; 78};
77 79
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
359{ 361{
360 u64 value; 362 u64 value;
361 rdmsrl(BYT_RATIOS, value); 363 rdmsrl(BYT_RATIOS, value);
362 return (value >> 8) & 0xFF; 364 return (value >> 8) & 0x3F;
363} 365}
364 366
365static int byt_get_max_pstate(void) 367static int byt_get_max_pstate(void)
366{ 368{
367 u64 value; 369 u64 value;
368 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
369 return (value >> 16) & 0xFF; 371 return (value >> 16) & 0x3F;
370} 372}
371 373
372static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
393 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 395 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
394 vid = fp_toint(vid_fp); 396 vid = fp_toint(vid_fp);
395 397
398 if (pstate > cpudata->pstate.max_pstate)
399 vid = cpudata->vid.turbo;
400
396 val |= vid; 401 val |= vid;
397 402
398 wrmsrl(MSR_IA32_PERF_CTL, val); 403 wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
402{ 407{
403 u64 value; 408 u64 value;
404 409
410
405 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
406 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
407 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
408 cpudata->vid.ratio = div_fp( 414 cpudata->vid.ratio = div_fp(
409 cpudata->vid.max - cpudata->vid.min, 415 cpudata->vid.max - cpudata->vid.min,
410 int_tofp(cpudata->pstate.max_pstate - 416 int_tofp(cpudata->pstate.max_pstate -
411 cpudata->pstate.min_pstate)); 417 cpudata->pstate.min_pstate));
418
419 rdmsrl(BYT_TURBO_VIDS, value);
420 cpudata->vid.turbo = value & 0x7f;
412} 421}
413 422
414 423
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
545 554
546 if (pstate_funcs.get_vid) 555 if (pstate_funcs.get_vid)
547 pstate_funcs.get_vid(cpu); 556 pstate_funcs.get_vid(cpu);
548 557 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
549 /*
550 * goto max pstate so we don't slow up boot if we are built-in if we are
551 * a module we will take care of it during normal operation
552 */
553 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
554} 558}
555 559
556static inline void intel_pstate_calc_busy(struct cpudata *cpu, 560static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
695 cpu = all_cpu_data[cpunum]; 699 cpu = all_cpu_data[cpunum];
696 700
697 intel_pstate_get_cpu_pstates(cpu); 701 intel_pstate_get_cpu_pstates(cpu);
698 if (!cpu->pstate.current_pstate) {
699 all_cpu_data[cpunum] = NULL;
700 kfree(cpu);
701 return -ENODATA;
702 }
703 702
704 cpu->cpu = cpunum; 703 cpu->cpu = cpunum;
705 704
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
710 cpu->timer.expires = jiffies + HZ/100; 709 cpu->timer.expires = jiffies + HZ/100;
711 intel_pstate_busy_pid_reset(cpu); 710 intel_pstate_busy_pid_reset(cpu);
712 intel_pstate_sample(cpu); 711 intel_pstate_sample(cpu);
713 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
714 712
715 add_timer_on(&cpu->timer, cpunum); 713 add_timer_on(&cpu->timer, cpunum);
716 714
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 9f25f5296029..0eabd81e1a90 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -16,9 +16,13 @@
16 char *tmp; \ 16 char *tmp; \
17 \ 17 \
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ 18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
19 sprintf(tmp, format, param); \ 19 if (likely(tmp)) { \
20 strcat(str, tmp); \ 20 sprintf(tmp, format, param); \
21 kfree(tmp); \ 21 strcat(str, tmp); \
22 kfree(tmp); \
23 } else { \
24 strcat(str, "kmalloc failure in SPRINTFCAT"); \
25 } \
22} 26}
23 27
24static void report_jump_idx(u32 status, char *outstr) 28static void report_jump_idx(u32 status, char *outstr)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL); 1010 DMA_BIDIRECTIONAL);
1011 } 1011 }
1012 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013} 1014}
1014 1015
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1074 memset(unmap, 0, sizeof(*unmap)); 1075 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref); 1076 kref_init(&unmap->kref);
1076 unmap->dev = dev; 1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1077 1079
1078 return unmap; 1080 return unmap;
1079} 1081}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1548 /* Disable BLOCK interrupts as well */ 1548 /* Disable BLOCK interrupts as well */
1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1550 1550
1551 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
1552 IRQF_SHARED, "dw_dmac", dw);
1553 if (err)
1554 return err;
1555
1556 /* Create a pool of consistent memory blocks for hardware descriptors */ 1551 /* Create a pool of consistent memory blocks for hardware descriptors */
1557 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1552 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1558 sizeof(struct dw_desc), 4, 0); 1553 sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1563 1558
1564 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1559 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1565 1560
1561 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1562 "dw_dmac", dw);
1563 if (err)
1564 return err;
1565
1566 INIT_LIST_HEAD(&dw->dma.channels); 1566 INIT_LIST_HEAD(&dw->dma.channels);
1567 for (i = 0; i < nr_channels; i++) { 1567 for (i = 0; i < nr_channels; i++) {
1568 struct dw_dma_chan *dwc = &dw->chan[i]; 1568 struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1667 dw_dma_off(dw); 1667 dw_dma_off(dw);
1668 dma_async_device_unregister(&dw->dma); 1668 dma_async_device_unregister(&dw->dma);
1669 1669
1670 free_irq(chip->irq, dw);
1670 tasklet_kill(&dw->tasklet); 1671 tasklet_kill(&dw->tasklet);
1671 1672
1672 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1673 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
191 191
192static void mv_chan_activate(struct mv_xor_chan *chan) 192static void mv_chan_activate(struct mv_xor_chan *chan)
193{ 193{
194 u32 activation;
195
196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
197 activation = readl_relaxed(XOR_ACTIVATION(chan)); 195
198 activation |= 0x1; 196 /* writel ensures all descriptors are flushed before activation */
199 writel_relaxed(activation, XOR_ACTIVATION(chan)); 197 writel(BIT(0), XOR_ACTIVATION(chan));
200} 198}
201 199
202static char mv_chan_is_busy(struct mv_xor_chan *chan) 200static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
113 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
114 unsigned sg_done; 114 unsigned sg_done;
115 struct sa11x0_dma_desc *txd_done; 115 struct sa11x0_dma_desc *txd_done;
116#ifdef CONFIG_PM_SLEEP
117 u32 dbs[2]; 116 u32 dbs[2];
118 u32 dbt[2]; 117 u32 dbt[2];
119 u32 dcsr; 118 u32 dcsr;
120#endif
121}; 119};
122 120
123struct sa11x0_dma_dev { 121struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
984 return 0; 982 return 0;
985} 983}
986 984
987#ifdef CONFIG_PM_SLEEP
988static int sa11x0_dma_suspend(struct device *dev) 985static int sa11x0_dma_suspend(struct device *dev)
989{ 986{
990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 987 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
1054 1051
1055 return 0; 1052 return 0;
1056} 1053}
1057#endif
1058 1054
1059static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1055static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend, 1056 .suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
237 237
238#define LOCAL_BUS 0xffc0 238#define LOCAL_BUS 0xffc0
239 239
240/* arbitrarily chosen maximum range for physical DMA: 128 TB */ 240/* OHCI-1394's default upper bound for physical DMA: 4 GB */
241#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) 241#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
242 242
243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
3716 version >> 16, version & 0xff, ohci->card.index, 3716 version >> 16, version & 0xff, ohci->card.index,
3717 ohci->n_ir, ohci->n_it, ohci->quirks, 3717 ohci->n_ir, ohci->n_it, ohci->quirks,
3718 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3718 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3719 ", >4 GB phys DMA" : ""); 3719 ", physUB" : "");
3720 3720
3721 return 0; 3721 return 0;
3722 3722
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 3ee852c9925b..071c2c969eec 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -756,6 +756,7 @@ static const struct {
756 */ 756 */
757 { ACPI_SIG_IBFT }, 757 { ACPI_SIG_IBFT },
758 { "iBFT" }, 758 { "iBFT" },
759 { "BIFT" }, /* Broadcom iSCSI Offload */
759}; 760};
760 761
761static void __init acpi_find_ibft_region(void) 762static void __init acpi_find_ibft_region(void)
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index e73c6755a5eb..70304220a479 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
305 305
306 .ngpio = 50, 306 .ngpio = 50,
307 .have_blink = true, 307 .have_blink = true,
308 .regs = ichx_regs,
309 .reglen = ichx_reglen,
308}; 310};
309 311
310/* Intel 3100 */ 312/* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
324 .uses_gpe0 = true, 326 .uses_gpe0 = true,
325 327
326 .ngpio = 50, 328 .ngpio = 50,
329 .regs = ichx_regs,
330 .reglen = ichx_reglen,
327}; 331};
328 332
329/* ICH7 and ICH8-based */ 333/* ICH7 and ICH8-based */
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 99a68310e7c0..3d53fd6880d1 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
894 dev_err(&spi->dev, "invalid spi-present-mask\n"); 894 dev_err(&spi->dev, "invalid spi-present-mask\n");
895 return -ENODEV; 895 return -ENODEV;
896 } 896 }
897 897 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
898 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) 898 if ((spi_present_mask & (1 << addr)))
899 chips++;
899 pullups[addr] = 0; 900 pullups[addr] = 0;
901 }
900 } else { 902 } else {
901 type = spi_get_device_id(spi)->driver_data; 903 type = spi_get_device_id(spi)->driver_data;
902 pdata = dev_get_platdata(&spi->dev); 904 pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
919 pullups[addr] = pdata->chip[addr].pullups; 921 pullups[addr] = pdata->chip[addr].pullups;
920 } 922 }
921 923
922 if (!chips)
923 return -ENODEV;
924
925 base = pdata->base; 924 base = pdata->base;
926 } 925 }
927 926
927 if (!chips)
928 return -ENODEV;
929
928 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), 930 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
929 GFP_KERNEL); 931 GFP_KERNEL);
930 if (!data) 932 if (!data)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
1833 flush_workqueue(dev_priv->wq); 1833 flush_workqueue(dev_priv->wq);
1834 1834
1835 mutex_lock(&dev->struct_mutex); 1835 mutex_lock(&dev->struct_mutex);
1836 i915_gem_free_all_phys_object(dev);
1837 i915_gem_cleanup_ringbuffer(dev); 1836 i915_gem_cleanup_ringbuffer(dev);
1838 i915_gem_context_fini(dev); 1837 i915_gem_context_fini(dev);
1839 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1838 WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 108e1ec2fa4b..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
242#define WATCH_LISTS 0 242#define WATCH_LISTS 0
243#define WATCH_GTT 0 243#define WATCH_GTT 0
244 244
245#define I915_GEM_PHYS_CURSOR_0 1
246#define I915_GEM_PHYS_CURSOR_1 2
247#define I915_GEM_PHYS_OVERLAY_REGS 3
248#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
249
250struct drm_i915_gem_phys_object {
251 int id;
252 struct page **page_list;
253 drm_dma_handle_t *handle;
254 struct drm_i915_gem_object *cur_obj;
255};
256
257struct opregion_header; 245struct opregion_header;
258struct opregion_acpi; 246struct opregion_acpi;
259struct opregion_swsci; 247struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
1187 /** Bit 6 swizzling required for Y tiling */ 1175 /** Bit 6 swizzling required for Y tiling */
1188 uint32_t bit_6_swizzle_y; 1176 uint32_t bit_6_swizzle_y;
1189 1177
1190 /* storage for physical objects */
1191 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1192
1193 /* accounting, useful for userland debugging */ 1178 /* accounting, useful for userland debugging */
1194 spinlock_t object_stat_lock; 1179 spinlock_t object_stat_lock;
1195 size_t object_memory; 1180 size_t object_memory;
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
1769 struct drm_file *pin_filp; 1754 struct drm_file *pin_filp;
1770 1755
1771 /** for phy allocated objects */ 1756 /** for phy allocated objects */
1772 struct drm_i915_gem_phys_object *phys_obj; 1757 drm_dma_handle_t *phys_handle;
1773}; 1758};
1774 1759
1775#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1760#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2204#define PIN_MAPPABLE 0x1 2189#define PIN_MAPPABLE 0x1
2205#define PIN_NONBLOCK 0x2 2190#define PIN_NONBLOCK 0x2
2206#define PIN_GLOBAL 0x4 2191#define PIN_GLOBAL 0x4
2192#define PIN_OFFSET_BIAS 0x8
2193#define PIN_OFFSET_MASK (~4095)
2207int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2194int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2208 struct i915_address_space *vm, 2195 struct i915_address_space *vm,
2209 uint32_t alignment, 2196 uint32_t alignment,
2210 unsigned flags); 2197 uint64_t flags);
2211int __must_check i915_vma_unbind(struct i915_vma *vma); 2198int __must_check i915_vma_unbind(struct i915_vma *vma);
2212int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2199int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2213void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2200void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2334 u32 alignment, 2321 u32 alignment,
2335 struct intel_ring_buffer *pipelined); 2322 struct intel_ring_buffer *pipelined);
2336void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2323void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2337int i915_gem_attach_phys_object(struct drm_device *dev, 2324int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2338 struct drm_i915_gem_object *obj,
2339 int id,
2340 int align); 2325 int align);
2341void i915_gem_detach_phys_object(struct drm_device *dev,
2342 struct drm_i915_gem_object *obj);
2343void i915_gem_free_all_phys_object(struct drm_device *dev);
2344int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2326int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2345void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2327void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2346 2328
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2465 int min_size, 2447 int min_size,
2466 unsigned alignment, 2448 unsigned alignment,
2467 unsigned cache_level, 2449 unsigned cache_level,
2450 unsigned long start,
2451 unsigned long end,
2468 unsigned flags); 2452 unsigned flags);
2469int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2453int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2470int i915_gem_evict_everything(struct drm_device *dev); 2454int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2871ce75f438..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
50 46
51static void i915_gem_write_fence(struct drm_device *dev, int reg, 47static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj); 48 struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 return 0; 205 return 0;
210} 206}
211 207
208static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
209{
210 drm_dma_handle_t *phys = obj->phys_handle;
211
212 if (!phys)
213 return;
214
215 if (obj->madv == I915_MADV_WILLNEED) {
216 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
217 char *vaddr = phys->vaddr;
218 int i;
219
220 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
221 struct page *page = shmem_read_mapping_page(mapping, i);
222 if (!IS_ERR(page)) {
223 char *dst = kmap_atomic(page);
224 memcpy(dst, vaddr, PAGE_SIZE);
225 drm_clflush_virt_range(dst, PAGE_SIZE);
226 kunmap_atomic(dst);
227
228 set_page_dirty(page);
229 mark_page_accessed(page);
230 page_cache_release(page);
231 }
232 vaddr += PAGE_SIZE;
233 }
234 i915_gem_chipset_flush(obj->base.dev);
235 }
236
237#ifdef CONFIG_X86
238 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
239#endif
240 drm_pci_free(obj->base.dev, phys);
241 obj->phys_handle = NULL;
242}
243
244int
245i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
246 int align)
247{
248 drm_dma_handle_t *phys;
249 struct address_space *mapping;
250 char *vaddr;
251 int i;
252
253 if (obj->phys_handle) {
254 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
255 return -EBUSY;
256
257 return 0;
258 }
259
260 if (obj->madv != I915_MADV_WILLNEED)
261 return -EFAULT;
262
263 if (obj->base.filp == NULL)
264 return -EINVAL;
265
266 /* create a new object */
267 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
268 if (!phys)
269 return -ENOMEM;
270
271 vaddr = phys->vaddr;
272#ifdef CONFIG_X86
273 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
274#endif
275 mapping = file_inode(obj->base.filp)->i_mapping;
276 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
277 struct page *page;
278 char *src;
279
280 page = shmem_read_mapping_page(mapping, i);
281 if (IS_ERR(page)) {
282#ifdef CONFIG_X86
283 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
284#endif
285 drm_pci_free(obj->base.dev, phys);
286 return PTR_ERR(page);
287 }
288
289 src = kmap_atomic(page);
290 memcpy(vaddr, src, PAGE_SIZE);
291 kunmap_atomic(src);
292
293 mark_page_accessed(page);
294 page_cache_release(page);
295
296 vaddr += PAGE_SIZE;
297 }
298
299 obj->phys_handle = phys;
300 return 0;
301}
302
303static int
304i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_pwrite *args,
306 struct drm_file *file_priv)
307{
308 struct drm_device *dev = obj->base.dev;
309 void *vaddr = obj->phys_handle->vaddr + args->offset;
310 char __user *user_data = to_user_ptr(args->data_ptr);
311
312 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
313 unsigned long unwritten;
314
315 /* The physical object once assigned is fixed for the lifetime
316 * of the obj, so we can safely drop the lock and continue
317 * to access vaddr.
318 */
319 mutex_unlock(&dev->struct_mutex);
320 unwritten = copy_from_user(vaddr, user_data, args->size);
321 mutex_lock(&dev->struct_mutex);
322 if (unwritten)
323 return -EFAULT;
324 }
325
326 i915_gem_chipset_flush(dev);
327 return 0;
328}
329
212void *i915_gem_object_alloc(struct drm_device *dev) 330void *i915_gem_object_alloc(struct drm_device *dev)
213{ 331{
214 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
921 * pread/pwrite currently are reading and writing from the CPU 1039 * pread/pwrite currently are reading and writing from the CPU
922 * perspective, requiring manual detiling by the client. 1040 * perspective, requiring manual detiling by the client.
923 */ 1041 */
924 if (obj->phys_obj) { 1042 if (obj->phys_handle) {
925 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1043 ret = i915_gem_phys_pwrite(obj, args, file);
926 goto out; 1044 goto out;
927 } 1045 }
928 1046
@@ -3208,12 +3326,14 @@ static struct i915_vma *
3208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3326i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3209 struct i915_address_space *vm, 3327 struct i915_address_space *vm,
3210 unsigned alignment, 3328 unsigned alignment,
3211 unsigned flags) 3329 uint64_t flags)
3212{ 3330{
3213 struct drm_device *dev = obj->base.dev; 3331 struct drm_device *dev = obj->base.dev;
3214 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct drm_i915_private *dev_priv = dev->dev_private;
3215 u32 size, fence_size, fence_alignment, unfenced_alignment; 3333 u32 size, fence_size, fence_alignment, unfenced_alignment;
3216 size_t gtt_max = 3334 unsigned long start =
3335 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3336 unsigned long end =
3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3337 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3218 struct i915_vma *vma; 3338 struct i915_vma *vma;
3219 int ret; 3339 int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3242 /* If the object is bigger than the entire aperture, reject it early 3362 /* If the object is bigger than the entire aperture, reject it early
3243 * before evicting everything in a vain attempt to find space. 3363 * before evicting everything in a vain attempt to find space.
3244 */ 3364 */
3245 if (obj->base.size > gtt_max) { 3365 if (obj->base.size > end) {
3246 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3366 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3247 obj->base.size, 3367 obj->base.size,
3248 flags & PIN_MAPPABLE ? "mappable" : "total", 3368 flags & PIN_MAPPABLE ? "mappable" : "total",
3249 gtt_max); 3369 end);
3250 return ERR_PTR(-E2BIG); 3370 return ERR_PTR(-E2BIG);
3251 } 3371 }
3252 3372
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3263search_free: 3383search_free:
3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3384 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3265 size, alignment, 3385 size, alignment,
3266 obj->cache_level, 0, gtt_max, 3386 obj->cache_level,
3387 start, end,
3267 DRM_MM_SEARCH_DEFAULT, 3388 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT); 3389 DRM_MM_CREATE_DEFAULT);
3269 if (ret) { 3390 if (ret) {
3270 ret = i915_gem_evict_something(dev, vm, size, alignment, 3391 ret = i915_gem_evict_something(dev, vm, size, alignment,
3271 obj->cache_level, flags); 3392 obj->cache_level,
3393 start, end,
3394 flags);
3272 if (ret == 0) 3395 if (ret == 0)
3273 goto search_free; 3396 goto search_free;
3274 3397
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3828 return ret; 3951 return ret;
3829} 3952}
3830 3953
3954static bool
3955i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3956{
3957 struct drm_i915_gem_object *obj = vma->obj;
3958
3959 if (alignment &&
3960 vma->node.start & (alignment - 1))
3961 return true;
3962
3963 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3964 return true;
3965
3966 if (flags & PIN_OFFSET_BIAS &&
3967 vma->node.start < (flags & PIN_OFFSET_MASK))
3968 return true;
3969
3970 return false;
3971}
3972
3831int 3973int
3832i915_gem_object_pin(struct drm_i915_gem_object *obj, 3974i915_gem_object_pin(struct drm_i915_gem_object *obj,
3833 struct i915_address_space *vm, 3975 struct i915_address_space *vm,
3834 uint32_t alignment, 3976 uint32_t alignment,
3835 unsigned flags) 3977 uint64_t flags)
3836{ 3978{
3837 struct i915_vma *vma; 3979 struct i915_vma *vma;
3838 int ret; 3980 int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3845 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3987 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3846 return -EBUSY; 3988 return -EBUSY;
3847 3989
3848 if ((alignment && 3990 if (i915_vma_misplaced(vma, alignment, flags)) {
3849 vma->node.start & (alignment - 1)) ||
3850 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3851 WARN(vma->pin_count, 3991 WARN(vma->pin_count,
3852 "bo is already pinned with incorrect alignment:" 3992 "bo is already pinned with incorrect alignment:"
3853 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3993 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3854 " obj->map_and_fenceable=%d\n", 3994 " obj->map_and_fenceable=%d\n",
3855 i915_gem_obj_offset(obj, vm), alignment, 3995 i915_gem_obj_offset(obj, vm), alignment,
3856 flags & PIN_MAPPABLE, 3996 !!(flags & PIN_MAPPABLE),
3857 obj->map_and_fenceable); 3997 obj->map_and_fenceable);
3858 ret = i915_vma_unbind(vma); 3998 ret = i915_vma_unbind(vma);
3859 if (ret) 3999 if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4163 4303
4164 trace_i915_gem_object_destroy(obj); 4304 trace_i915_gem_object_destroy(obj);
4165 4305
4166 if (obj->phys_obj)
4167 i915_gem_detach_phys_object(dev, obj);
4168
4169 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4306 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4170 int ret; 4307 int ret;
4171 4308
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4183 } 4320 }
4184 } 4321 }
4185 4322
4323 i915_gem_object_detach_phys(obj);
4324
4186 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4325 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4187 * before progressing. */ 4326 * before progressing. */
4188 if (obj->stolen) 4327 if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
4646 register_shrinker(&dev_priv->mm.inactive_shrinker); 4785 register_shrinker(&dev_priv->mm.inactive_shrinker);
4647} 4786}
4648 4787
4649/*
4650 * Create a physically contiguous memory object for this object
4651 * e.g. for cursor + overlay regs
4652 */
4653static int i915_gem_init_phys_object(struct drm_device *dev,
4654 int id, int size, int align)
4655{
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct drm_i915_gem_phys_object *phys_obj;
4658 int ret;
4659
4660 if (dev_priv->mm.phys_objs[id - 1] || !size)
4661 return 0;
4662
4663 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4664 if (!phys_obj)
4665 return -ENOMEM;
4666
4667 phys_obj->id = id;
4668
4669 phys_obj->handle = drm_pci_alloc(dev, size, align);
4670 if (!phys_obj->handle) {
4671 ret = -ENOMEM;
4672 goto kfree_obj;
4673 }
4674#ifdef CONFIG_X86
4675 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4676#endif
4677
4678 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4679
4680 return 0;
4681kfree_obj:
4682 kfree(phys_obj);
4683 return ret;
4684}
4685
4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj;
4690
4691 if (!dev_priv->mm.phys_objs[id - 1])
4692 return;
4693
4694 phys_obj = dev_priv->mm.phys_objs[id - 1];
4695 if (phys_obj->cur_obj) {
4696 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4697 }
4698
4699#ifdef CONFIG_X86
4700 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4701#endif
4702 drm_pci_free(dev, phys_obj->handle);
4703 kfree(phys_obj);
4704 dev_priv->mm.phys_objs[id - 1] = NULL;
4705}
4706
4707void i915_gem_free_all_phys_object(struct drm_device *dev)
4708{
4709 int i;
4710
4711 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4712 i915_gem_free_phys_object(dev, i);
4713}
4714
4715void i915_gem_detach_phys_object(struct drm_device *dev,
4716 struct drm_i915_gem_object *obj)
4717{
4718 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4719 char *vaddr;
4720 int i;
4721 int page_count;
4722
4723 if (!obj->phys_obj)
4724 return;
4725 vaddr = obj->phys_obj->handle->vaddr;
4726
4727 page_count = obj->base.size / PAGE_SIZE;
4728 for (i = 0; i < page_count; i++) {
4729 struct page *page = shmem_read_mapping_page(mapping, i);
4730 if (!IS_ERR(page)) {
4731 char *dst = kmap_atomic(page);
4732 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4733 kunmap_atomic(dst);
4734
4735 drm_clflush_pages(&page, 1);
4736
4737 set_page_dirty(page);
4738 mark_page_accessed(page);
4739 page_cache_release(page);
4740 }
4741 }
4742 i915_gem_chipset_flush(dev);
4743
4744 obj->phys_obj->cur_obj = NULL;
4745 obj->phys_obj = NULL;
4746}
4747
4748int
4749i915_gem_attach_phys_object(struct drm_device *dev,
4750 struct drm_i915_gem_object *obj,
4751 int id,
4752 int align)
4753{
4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4755 struct drm_i915_private *dev_priv = dev->dev_private;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4759
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4762
4763 if (obj->phys_obj) {
4764 if (obj->phys_obj->id == id)
4765 return 0;
4766 i915_gem_detach_phys_object(dev, obj);
4767 }
4768
4769 /* create a new object */
4770 if (!dev_priv->mm.phys_objs[id - 1]) {
4771 ret = i915_gem_init_phys_object(dev, id,
4772 obj->base.size, align);
4773 if (ret) {
4774 DRM_ERROR("failed to init phys object %d size: %zu\n",
4775 id, obj->base.size);
4776 return ret;
4777 }
4778 }
4779
4780 /* bind to the object */
4781 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4782 obj->phys_obj->cur_obj = obj;
4783
4784 page_count = obj->base.size / PAGE_SIZE;
4785
4786 for (i = 0; i < page_count; i++) {
4787 struct page *page;
4788 char *dst, *src;
4789
4790 page = shmem_read_mapping_page(mapping, i);
4791 if (IS_ERR(page))
4792 return PTR_ERR(page);
4793
4794 src = kmap_atomic(page);
4795 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4796 memcpy(dst, src, PAGE_SIZE);
4797 kunmap_atomic(src);
4798
4799 mark_page_accessed(page);
4800 page_cache_release(page);
4801 }
4802
4803 return 0;
4804}
4805
4806static int
4807i915_gem_phys_pwrite(struct drm_device *dev,
4808 struct drm_i915_gem_object *obj,
4809 struct drm_i915_gem_pwrite *args,
4810 struct drm_file *file_priv)
4811{
4812 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4813 char __user *user_data = to_user_ptr(args->data_ptr);
4814
4815 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4816 unsigned long unwritten;
4817
4818 /* The physical object once assigned is fixed for the lifetime
4819 * of the obj, so we can safely drop the lock and continue
4820 * to access vaddr.
4821 */
4822 mutex_unlock(&dev->struct_mutex);
4823 unwritten = copy_from_user(vaddr, user_data, args->size);
4824 mutex_lock(&dev->struct_mutex);
4825 if (unwritten)
4826 return -EFAULT;
4827 }
4828
4829 i915_gem_chipset_flush(dev);
4830 return 0;
4831}
4832
4833void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4788void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4834{ 4789{
4835 struct drm_i915_file_private *file_priv = file->driver_priv; 4790 struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned long start, unsigned long end,
71 unsigned flags) 72 unsigned flags)
72{ 73{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (flags & PIN_MAPPABLE) { 105 if (start != 0 || end != vm->total) {
106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 106 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 107 alignment, cache_level,
109 dev_priv->gtt.mappable_end); 108 start, end);
110 } else 109 } else
111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
112 111
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2c9d9cbaf653..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40#define BATCH_OFFSET_BIAS (256*1024)
38 41
39struct eb_vmas { 42struct eb_vmas {
40 struct list_head vmas; 43 struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 548 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 549 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547 bool need_fence; 550 bool need_fence;
548 unsigned flags; 551 uint64_t flags;
549 int ret; 552 int ret;
550 553
551 flags = 0; 554 flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
559 562
560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 563 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
561 flags |= PIN_GLOBAL; 564 flags |= PIN_GLOBAL;
565 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
566 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
562 567
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 568 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
564 if (ret) 569 if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
592 return 0; 597 return 0;
593} 598}
594 599
600static bool
601eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
602{
603 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
604 struct drm_i915_gem_object *obj = vma->obj;
605 bool need_fence, need_mappable;
606
607 need_fence =
608 has_fenced_gpu_access &&
609 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
610 obj->tiling_mode != I915_TILING_NONE;
611 need_mappable = need_fence || need_reloc_mappable(vma);
612
613 WARN_ON((need_mappable || need_fence) &&
614 !i915_is_ggtt(vma->vm));
615
616 if (entry->alignment &&
617 vma->node.start & (entry->alignment - 1))
618 return true;
619
620 if (need_mappable && !obj->map_and_fenceable)
621 return true;
622
623 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
624 vma->node.start < BATCH_OFFSET_BIAS)
625 return true;
626
627 return false;
628}
629
595static int 630static int
596i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
597 struct list_head *vmas, 632 struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
653 688
654 /* Unbind any ill-fitting objects or pin. */ 689 /* Unbind any ill-fitting objects or pin. */
655 list_for_each_entry(vma, vmas, exec_list) { 690 list_for_each_entry(vma, vmas, exec_list) {
656 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
657 bool need_fence, need_mappable;
658
659 obj = vma->obj;
660
661 if (!drm_mm_node_allocated(&vma->node)) 691 if (!drm_mm_node_allocated(&vma->node))
662 continue; 692 continue;
663 693
664 need_fence = 694 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
665 has_fenced_gpu_access &&
666 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
667 obj->tiling_mode != I915_TILING_NONE;
668 need_mappable = need_fence || need_reloc_mappable(vma);
669
670 WARN_ON((need_mappable || need_fence) &&
671 !i915_is_ggtt(vma->vm));
672
673 if ((entry->alignment &&
674 vma->node.start & (entry->alignment - 1)) ||
675 (need_mappable && !obj->map_and_fenceable))
676 ret = i915_vma_unbind(vma); 695 ret = i915_vma_unbind(vma);
677 else 696 else
678 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 697 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
773 * relocations were valid. 792 * relocations were valid.
774 */ 793 */
775 for (j = 0; j < exec[i].relocation_count; j++) { 794 for (j = 0; j < exec[i].relocation_count; j++) {
776 if (copy_to_user(&user_relocs[j].presumed_offset, 795 if (__copy_to_user(&user_relocs[j].presumed_offset,
777 &invalid_offset, 796 &invalid_offset,
778 sizeof(invalid_offset))) { 797 sizeof(invalid_offset))) {
779 ret = -EFAULT; 798 ret = -EFAULT;
780 mutex_lock(&dev->struct_mutex); 799 mutex_lock(&dev->struct_mutex);
781 goto err; 800 goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
999 return 0; 1018 return 0;
1000} 1019}
1001 1020
1021static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb)
1023{
1024 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1025
1026 /*
1027 * SNA is doing fancy tricks with compressing batch buffers, which leads
1028 * to negative relocation deltas. Usually that works out ok since the
1029 * relocate address is still positive, except when the batch is placed
1030 * very low in the GTT. Ensure this doesn't happen.
1031 *
1032 * Note that actual hangs have only been observed on gen7, but for
1033 * paranoia do it everywhere.
1034 */
1035 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1036
1037 return vma->obj;
1038}
1039
1002static int 1040static int
1003i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1041i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1004 struct drm_file *file, 1042 struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1153 goto err; 1191 goto err;
1154 1192
1155 /* take note of the batch buffer before we might reorder the lists */ 1193 /* take note of the batch buffer before we might reorder the lists */
1156 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; 1194 batch_obj = eb_get_batch(eb);
1157 1195
1158 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1196 /* Move the objects en-masse into the GTT, evicting if necessary. */
1159 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1197 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1355 1393
1356 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1394 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1357 if (!ret) { 1395 if (!ret) {
1396 struct drm_i915_gem_exec_object __user *user_exec_list =
1397 to_user_ptr(args->buffers_ptr);
1398
1358 /* Copy the new buffer offsets back to the user's exec list. */ 1399 /* Copy the new buffer offsets back to the user's exec list. */
1359 for (i = 0; i < args->buffer_count; i++) 1400 for (i = 0; i < args->buffer_count; i++) {
1360 exec_list[i].offset = exec2_list[i].offset; 1401 ret = __copy_to_user(&user_exec_list[i].offset,
1361 /* ... and back out to userspace */ 1402 &exec2_list[i].offset,
1362 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1403 sizeof(user_exec_list[i].offset));
1363 exec_list, 1404 if (ret) {
1364 sizeof(*exec_list) * args->buffer_count); 1405 ret = -EFAULT;
1365 if (ret) { 1406 DRM_DEBUG("failed to copy %d exec entries "
1366 ret = -EFAULT; 1407 "back to user (%d)\n",
1367 DRM_DEBUG("failed to copy %d exec entries " 1408 args->buffer_count, ret);
1368 "back to user (%d)\n", 1409 break;
1369 args->buffer_count, ret); 1410 }
1370 } 1411 }
1371 } 1412 }
1372 1413
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1412 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1453 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1413 if (!ret) { 1454 if (!ret) {
1414 /* Copy the new buffer offsets back to the user's exec list. */ 1455 /* Copy the new buffer offsets back to the user's exec list. */
1415 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1456 struct drm_i915_gem_exec_object2 *user_exec_list =
1416 exec2_list, 1457 to_user_ptr(args->buffers_ptr);
1417 sizeof(*exec2_list) * args->buffer_count); 1458 int i;
1418 if (ret) { 1459
1419 ret = -EFAULT; 1460 for (i = 0; i < args->buffer_count; i++) {
1420 DRM_DEBUG("failed to copy %d exec entries " 1461 ret = __copy_to_user(&user_exec_list[i].offset,
1421 "back to user (%d)\n", 1462 &exec2_list[i].offset,
1422 args->buffer_count, ret); 1463 sizeof(user_exec_list[i].offset));
1464 if (ret) {
1465 ret = -EFAULT;
1466 DRM_DEBUG("failed to copy %d exec entries "
1467 "back to user\n",
1468 args->buffer_count);
1469 break;
1470 }
1423 } 1471 }
1424 } 1472 }
1425 1473
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 154b0f8bb88d..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1089,7 +1089,9 @@ alloc:
1089 if (ret == -ENOSPC && !retried) { 1089 if (ret == -ENOSPC && !retried) {
1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1091 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1091 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1092 I915_CACHE_NONE, 0); 1092 I915_CACHE_NONE,
1093 0, dev_priv->gtt.base.total,
1094 0);
1093 if (ret) 1095 if (ret)
1094 return ret; 1096 return ret;
1095 1097
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 48aa516a1ac0..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7825 addr = i915_gem_obj_ggtt_offset(obj); 7825 addr = i915_gem_obj_ggtt_offset(obj);
7826 } else { 7826 } else {
7827 int align = IS_I830(dev) ? 16 * 1024 : 256; 7827 int align = IS_I830(dev) ? 16 * 1024 : 256;
7828 ret = i915_gem_attach_phys_object(dev, obj, 7828 ret = i915_gem_object_attach_phys(obj, align);
7829 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7830 align);
7831 if (ret) { 7829 if (ret) {
7832 DRM_DEBUG_KMS("failed to attach phys object\n"); 7830 DRM_DEBUG_KMS("failed to attach phys object\n");
7833 goto fail_locked; 7831 goto fail_locked;
7834 } 7832 }
7835 addr = obj->phys_obj->handle->busaddr; 7833 addr = obj->phys_handle->busaddr;
7836 } 7834 }
7837 7835
7838 if (IS_GEN2(dev)) 7836 if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7840 7838
7841 finish: 7839 finish:
7842 if (intel_crtc->cursor_bo) { 7840 if (intel_crtc->cursor_bo) {
7843 if (INTEL_INFO(dev)->cursor_needs_physical) { 7841 if (!INTEL_INFO(dev)->cursor_needs_physical)
7844 if (intel_crtc->cursor_bo != obj)
7845 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7846 } else
7847 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 7842 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7848 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 7843 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7849 } 7844 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
1340 overlay->reg_bo = reg_bo; 1340 overlay->reg_bo = reg_bo;
1341 1341
1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1343 ret = i915_gem_attach_phys_object(dev, reg_bo, 1343 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1344 I915_GEM_PHYS_OVERLAY_REGS,
1345 PAGE_SIZE);
1346 if (ret) { 1344 if (ret) {
1347 DRM_ERROR("failed to attach phys overlay regs\n"); 1345 DRM_ERROR("failed to attach phys overlay regs\n");
1348 goto out_free_bo; 1346 goto out_free_bo;
1349 } 1347 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1348 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1351 } else { 1349 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1350 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1351 if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1428 /* Cast to make sparse happy, but it's wc memory anyway, so 1426 /* Cast to make sparse happy, but it's wc memory anyway, so
1429 * equivalent to the wc io mapping on X86. */ 1427 * equivalent to the wc io mapping on X86. */
1430 regs = (struct overlay_registers __iomem *) 1428 regs = (struct overlay_registers __iomem *)
1431 overlay->reg_bo->phys_obj->handle->vaddr; 1429 overlay->reg_bo->phys_handle->vaddr;
1432 else 1430 else
1433 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1431 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1434 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1432 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1462 error->dovsta = I915_READ(DOVSTA); 1460 error->dovsta = I915_READ(DOVSTA);
1463 error->isr = I915_READ(ISR); 1461 error->isr = I915_READ(ISR);
1464 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1462 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1465 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1463 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1466 else 1464 else
1467 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); 1465 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1468 1466
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
1009 } 1009 }
1010 1010
1011 if (outp == 8) 1011 if (outp == 8)
1012 return false; 1012 return conf;
1013 1013
1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); 1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
1015 if (data == 0x0000) 1015 if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
40 case 0x00: return 2; 40 case 0x00: return 2;
41 case 0x19: return 1; 41 case 0x19: return 1;
42 case 0x1c: return 0; 42 case 0x1c: return 0;
43 case 0x1e: return 2;
43 default: 44 default:
44 break; 45 break;
45 } 46 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 68528619834a..8149e7cf4303 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1642,6 +1642,7 @@ struct radeon_vce {
1642 unsigned fb_version; 1642 unsigned fb_version;
1643 atomic_t handles[RADEON_MAX_VCE_HANDLES]; 1643 atomic_t handles[RADEON_MAX_VCE_HANDLES];
1644 struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; 1644 struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
1645 unsigned img_size[RADEON_MAX_VCE_HANDLES];
1645 struct delayed_work idle_work; 1646 struct delayed_work idle_work;
1646}; 1647};
1647 1648
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1655 uint32_t handle, struct radeon_fence **fence); 1656 uint32_t handle, struct radeon_fence **fence);
1656void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); 1657void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1657void radeon_vce_note_usage(struct radeon_device *rdev); 1658void radeon_vce_note_usage(struct radeon_device *rdev);
1658int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); 1659int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1659int radeon_vce_cs_parse(struct radeon_cs_parser *p); 1660int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1660bool radeon_vce_semaphore_emit(struct radeon_device *rdev, 1661bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1661 struct radeon_ring *ring, 1662 struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
2640#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) 2641#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2641#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) 2642#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
2642#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) 2643#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
2643#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) 2644#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
2645 (rdev->family == CHIP_MULLINS))
2644 2646
2645#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ 2647#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2646 (rdev->ddev->pdev->device == 0x6850) || \ 2648 (rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
196 } 196 }
197 } 197 }
198 198
199 if (!found) {
200 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
201 dhandle = ACPI_HANDLE(&pdev->dev);
202 if (!dhandle)
203 continue;
204
205 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
206 if (!ACPI_FAILURE(status)) {
207 found = true;
208 break;
209 }
210 }
211 }
212
199 if (!found) 213 if (!found)
200 return false; 214 return false;
201 215
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
152 uint32_t domain = r->write_domain ? 152 uint32_t domain = r->write_domain ?
153 r->write_domain : r->read_domains; 153 r->write_domain : r->read_domains;
154 154
155 if (domain & RADEON_GEM_DOMAIN_CPU) {
156 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
157 "for command submission\n");
158 return -EINVAL;
159 }
160
155 p->relocs[i].domain = domain; 161 p->relocs[i].domain = domain;
156 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
157 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
342 return -EINVAL; 348 return -EINVAL;
343 349
344 /* we only support VM on some SI+ rings */ 350 /* we only support VM on some SI+ rings */
345 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && 351 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
346 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 352 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
347 DRM_ERROR("Ring %d requires VM!\n", p->ring); 353 DRM_ERROR("Ring %d requires VM!\n", p->ring);
348 return -EINVAL; 354 return -EINVAL;
355 }
356 } else {
357 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
358 DRM_ERROR("VM not supported on ring %d!\n",
359 p->ring);
360 return -EINVAL;
361 }
349 } 362 }
350 } 363 }
351 364
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0e770bbf7e29..14671406212f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1533 1533
1534 radeon_restore_bios_scratch_regs(rdev); 1534 radeon_restore_bios_scratch_regs(rdev);
1535 1535
1536 if (fbcon) {
1537 radeon_fbdev_set_suspend(rdev, 0);
1538 console_unlock();
1539 }
1540
1541 /* init dig PHYs, disp eng pll */ 1536 /* init dig PHYs, disp eng pll */
1542 if (rdev->is_atom_bios) { 1537 if (rdev->is_atom_bios) {
1543 radeon_atom_encoder_init(rdev); 1538 radeon_atom_encoder_init(rdev);
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1562 } 1557 }
1563 1558
1564 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560
1561 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock();
1564 }
1565
1565 return 0; 1566 return 0;
1566} 1567}
1567 1568
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 408b6ac53f0b..356b733caafe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
862 unsigned *fb_div, unsigned *ref_div) 862 unsigned *fb_div, unsigned *ref_div)
863{ 863{
864 /* limit reference * post divider to a maximum */ 864 /* limit reference * post divider to a maximum */
865 ref_div_max = min(128 / post_div, ref_div_max); 865 ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
866 866
867 /* get matching reference and feedback divider */ 867 /* get matching reference and feedback divider */
868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
999 999
1000 /* avoid high jitter with small fractional dividers */ 1000 /* avoid high jitter with small fractional dividers */
1001 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 1001 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
1002 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); 1002 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
1003 if (fb_div < fb_div_min) { 1003 if (fb_div < fb_div_min) {
1004 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 1004 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
1005 fb_div *= tmp; 1005 fb_div *= tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0cc47f12d995..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
577 return r; 577 return r;
578 } 578 }
579 579
580 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 580 if (rdev->accel_working) {
581 if (r) { 581 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
582 radeon_vm_fini(rdev, &fpriv->vm); 582 if (r) {
583 kfree(fpriv); 583 radeon_vm_fini(rdev, &fpriv->vm);
584 return r; 584 kfree(fpriv);
585 } 585 return r;
586 }
586 587
587 /* map the ib pool buffer read only into 588 /* map the ib pool buffer read only into
588 * virtual address space */ 589 * virtual address space */
589 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 590 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
590 rdev->ring_tmp_bo.bo); 591 rdev->ring_tmp_bo.bo);
591 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 592 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
592 RADEON_VM_PAGE_READABLE | 593 RADEON_VM_PAGE_READABLE |
593 RADEON_VM_PAGE_SNOOPED); 594 RADEON_VM_PAGE_SNOOPED);
594 595
595 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 596 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
596 if (r) { 597 if (r) {
597 radeon_vm_fini(rdev, &fpriv->vm); 598 radeon_vm_fini(rdev, &fpriv->vm);
598 kfree(fpriv); 599 kfree(fpriv);
599 return r; 600 return r;
601 }
600 } 602 }
601
602 file_priv->driver_priv = fpriv; 603 file_priv->driver_priv = fpriv;
603 } 604 }
604 605
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
626 struct radeon_bo_va *bo_va; 627 struct radeon_bo_va *bo_va;
627 int r; 628 int r;
628 629
629 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 630 if (rdev->accel_working) {
630 if (!r) { 631 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
631 bo_va = radeon_vm_bo_find(&fpriv->vm, 632 if (!r) {
632 rdev->ring_tmp_bo.bo); 633 bo_va = radeon_vm_bo_find(&fpriv->vm,
633 if (bo_va) 634 rdev->ring_tmp_bo.bo);
634 radeon_vm_bo_rmv(rdev, bo_va); 635 if (bo_va)
635 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 636 radeon_vm_bo_rmv(rdev, bo_va);
637 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
638 }
636 } 639 }
637 640
638 radeon_vm_fini(rdev, &fpriv->vm); 641 radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0dbfa38..4faa4d6f9bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
458 * into account. We don't want to disallow buffer moves 458 * into account. We don't want to disallow buffer moves
459 * completely. 459 * completely.
460 */ 460 */
461 if (current_domain != RADEON_GEM_DOMAIN_CPU && 461 if ((lobj->alt_domain & current_domain) != 0 &&
462 (domain & current_domain) == 0 && /* will be moved */ 462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) { 463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */ 464 /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
699 rbo = container_of(bo, struct radeon_bo, tbo); 699 rbo = container_of(bo, struct radeon_bo, tbo);
700 radeon_bo_check_tiling(rbo, 0, 0); 700 radeon_bo_check_tiling(rbo, 0, 0);
701 rdev = rbo->rdev; 701 rdev = rbo->rdev;
702 if (bo->mem.mem_type == TTM_PL_VRAM) { 702 if (bo->mem.mem_type != TTM_PL_VRAM)
703 size = bo->mem.num_pages << PAGE_SHIFT; 703 return 0;
704 offset = bo->mem.start << PAGE_SHIFT; 704
705 if ((offset + size) > rdev->mc.visible_vram_size) { 705 size = bo->mem.num_pages << PAGE_SHIFT;
706 /* hurrah the memory is not visible ! */ 706 offset = bo->mem.start << PAGE_SHIFT;
707 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 707 if ((offset + size) <= rdev->mc.visible_vram_size)
708 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 708 return 0;
709 r = ttm_bo_validate(bo, &rbo->placement, false, false); 709
710 if (unlikely(r != 0)) 710 /* hurrah the memory is not visible ! */
711 return r; 711 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
712 offset = bo->mem.start << PAGE_SHIFT; 712 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
713 /* this should not happen */ 713 r = ttm_bo_validate(bo, &rbo->placement, false, false);
714 if ((offset + size) > rdev->mc.visible_vram_size) 714 if (unlikely(r == -ENOMEM)) {
715 return -EINVAL; 715 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
716 } 716 return ttm_bo_validate(bo, &rbo->placement, false, false);
717 } else if (unlikely(r != 0)) {
718 return r;
717 } 719 }
720
721 offset = bo->mem.start << PAGE_SHIFT;
722 /* this should never happen */
723 if ((offset + size) > rdev->mc.visible_vram_size)
724 return -EINVAL;
725
718 return 0; 726 return 0;
719} 727}
720 728
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f30b8426eee2..53d6e1bb48dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
361 struct drm_device *ddev = dev_get_drvdata(dev); 361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
363 363
364 /* Can't set profile when the card is off */
365 if ((rdev->flags & RADEON_IS_PX) &&
366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367 return -EINVAL;
368
364 mutex_lock(&rdev->pm.mutex); 369 mutex_lock(&rdev->pm.mutex);
365 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 370 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
366 if (strncmp("default", buf, strlen("default")) == 0) 371 if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
409 struct drm_device *ddev = dev_get_drvdata(dev); 414 struct drm_device *ddev = dev_get_drvdata(dev);
410 struct radeon_device *rdev = ddev->dev_private; 415 struct radeon_device *rdev = ddev->dev_private;
411 416
417 /* Can't set method when the card is off */
418 if ((rdev->flags & RADEON_IS_PX) &&
419 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420 count = -EINVAL;
421 goto fail;
422 }
423
412 /* we don't support the legacy modes with dpm */ 424 /* we don't support the legacy modes with dpm */
413 if (rdev->pm.pm_method == PM_METHOD_DPM) { 425 if (rdev->pm.pm_method == PM_METHOD_DPM) {
414 count = -EINVAL; 426 count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
446 struct radeon_device *rdev = ddev->dev_private; 458 struct radeon_device *rdev = ddev->dev_private;
447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 459 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
448 460
461 if ((rdev->flags & RADEON_IS_PX) &&
462 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
463 return snprintf(buf, PAGE_SIZE, "off\n");
464
449 return snprintf(buf, PAGE_SIZE, "%s\n", 465 return snprintf(buf, PAGE_SIZE, "%s\n",
450 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 466 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
451 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 467 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
459 struct drm_device *ddev = dev_get_drvdata(dev); 475 struct drm_device *ddev = dev_get_drvdata(dev);
460 struct radeon_device *rdev = ddev->dev_private; 476 struct radeon_device *rdev = ddev->dev_private;
461 477
478 /* Can't set dpm state when the card is off */
479 if ((rdev->flags & RADEON_IS_PX) &&
480 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
481 return -EINVAL;
482
462 mutex_lock(&rdev->pm.mutex); 483 mutex_lock(&rdev->pm.mutex);
463 if (strncmp("battery", buf, strlen("battery")) == 0) 484 if (strncmp("battery", buf, strlen("battery")) == 0)
464 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 485 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
485 struct radeon_device *rdev = ddev->dev_private; 506 struct radeon_device *rdev = ddev->dev_private;
486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 507 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
487 508
509 if ((rdev->flags & RADEON_IS_PX) &&
510 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
511 return snprintf(buf, PAGE_SIZE, "off\n");
512
488 return snprintf(buf, PAGE_SIZE, "%s\n", 513 return snprintf(buf, PAGE_SIZE, "%s\n",
489 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 514 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
490 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 515 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
500 enum radeon_dpm_forced_level level; 525 enum radeon_dpm_forced_level level;
501 int ret = 0; 526 int ret = 0;
502 527
528 /* Can't force performance level when the card is off */
529 if ((rdev->flags & RADEON_IS_PX) &&
530 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
531 return -EINVAL;
532
503 mutex_lock(&rdev->pm.mutex); 533 mutex_lock(&rdev->pm.mutex);
504 if (strncmp("low", buf, strlen("low")) == 0) { 534 if (strncmp("low", buf, strlen("low")) == 0) {
505 level = RADEON_DPM_FORCED_LEVEL_LOW; 535 level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
538 char *buf) 568 char *buf)
539{ 569{
540 struct radeon_device *rdev = dev_get_drvdata(dev); 570 struct radeon_device *rdev = dev_get_drvdata(dev);
571 struct drm_device *ddev = rdev->ddev;
541 int temp; 572 int temp;
542 573
574 /* Can't get temperature when the card is off */
575 if ((rdev->flags & RADEON_IS_PX) &&
576 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
577 return -EINVAL;
578
543 if (rdev->asic->pm.get_temperature) 579 if (rdev->asic->pm.get_temperature)
544 temp = radeon_get_temperature(rdev); 580 temp = radeon_get_temperature(rdev);
545 else 581 else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1614 struct drm_info_node *node = (struct drm_info_node *) m->private; 1650 struct drm_info_node *node = (struct drm_info_node *) m->private;
1615 struct drm_device *dev = node->minor->dev; 1651 struct drm_device *dev = node->minor->dev;
1616 struct radeon_device *rdev = dev->dev_private; 1652 struct radeon_device *rdev = dev->dev_private;
1653 struct drm_device *ddev = rdev->ddev;
1617 1654
1618 if (rdev->pm.dpm_enabled) { 1655 if ((rdev->flags & RADEON_IS_PX) &&
1656 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1657 seq_printf(m, "PX asic powered off\n");
1658 } else if (rdev->pm.dpm_enabled) {
1619 mutex_lock(&rdev->pm.mutex); 1659 mutex_lock(&rdev->pm.mutex);
1620 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1660 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1621 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1661 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f73324c81491..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
443 * @p: parser context 443 * @p: parser context
444 * @lo: address of lower dword 444 * @lo: address of lower dword
445 * @hi: address of higher dword 445 * @hi: address of higher dword
446 * @size: size of checker for relocation buffer
446 * 447 *
447 * Patch relocation inside command stream with real buffer address 448 * Patch relocation inside command stream with real buffer address
448 */ 449 */
449int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) 450int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
451 unsigned size)
450{ 452{
451 struct radeon_cs_chunk *relocs_chunk; 453 struct radeon_cs_chunk *relocs_chunk;
452 uint64_t offset; 454 struct radeon_cs_reloc *reloc;
455 uint64_t start, end, offset;
453 unsigned idx; 456 unsigned idx;
454 457
455 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 458 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
462 return -EINVAL; 465 return -EINVAL;
463 } 466 }
464 467
465 offset += p->relocs_ptr[(idx / 4)]->gpu_offset; 468 reloc = p->relocs_ptr[(idx / 4)];
469 start = reloc->gpu_offset;
470 end = start + radeon_bo_size(reloc->robj);
471 start += offset;
466 472
467 p->ib.ptr[lo] = offset & 0xFFFFFFFF; 473 p->ib.ptr[lo] = start & 0xFFFFFFFF;
468 p->ib.ptr[hi] = offset >> 32; 474 p->ib.ptr[hi] = start >> 32;
475
476 if (end <= start) {
477 DRM_ERROR("invalid reloc offset %llX!\n", offset);
478 return -EINVAL;
479 }
480 if ((end - start) < size) {
481 DRM_ERROR("buffer to small (%d / %d)!\n",
482 (unsigned)(end - start), size);
483 return -EINVAL;
484 }
469 485
470 return 0; 486 return 0;
471} 487}
472 488
473/** 489/**
490 * radeon_vce_validate_handle - validate stream handle
491 *
492 * @p: parser context
493 * @handle: handle to validate
494 *
495 * Validates the handle and return the found session index or -EINVAL
496 * we we don't have another free session index.
497 */
498int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
499{
500 unsigned i;
501
502 /* validate the handle */
503 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
504 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
505 return i;
506 }
507
508 /* handle not found try to alloc a new one */
509 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
510 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
511 p->rdev->vce.filp[i] = p->filp;
512 p->rdev->vce.img_size[i] = 0;
513 return i;
514 }
515 }
516
517 DRM_ERROR("No more free VCE handles!\n");
518 return -EINVAL;
519}
520
521/**
474 * radeon_vce_cs_parse - parse and validate the command stream 522 * radeon_vce_cs_parse - parse and validate the command stream
475 * 523 *
476 * @p: parser context 524 * @p: parser context
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
478 */ 526 */
479int radeon_vce_cs_parse(struct radeon_cs_parser *p) 527int radeon_vce_cs_parse(struct radeon_cs_parser *p)
480{ 528{
481 uint32_t handle = 0; 529 int session_idx = -1;
482 bool destroy = false; 530 bool destroyed = false;
531 uint32_t tmp, handle = 0;
532 uint32_t *size = &tmp;
483 int i, r; 533 int i, r;
484 534
485 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 535 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
491 return -EINVAL; 541 return -EINVAL;
492 } 542 }
493 543
544 if (destroyed) {
545 DRM_ERROR("No other command allowed after destroy!\n");
546 return -EINVAL;
547 }
548
494 switch (cmd) { 549 switch (cmd) {
495 case 0x00000001: // session 550 case 0x00000001: // session
496 handle = radeon_get_ib_value(p, p->idx + 2); 551 handle = radeon_get_ib_value(p, p->idx + 2);
552 session_idx = radeon_vce_validate_handle(p, handle);
553 if (session_idx < 0)
554 return session_idx;
555 size = &p->rdev->vce.img_size[session_idx];
497 break; 556 break;
498 557
499 case 0x00000002: // task info 558 case 0x00000002: // task info
559 break;
560
500 case 0x01000001: // create 561 case 0x01000001: // create
562 *size = radeon_get_ib_value(p, p->idx + 8) *
563 radeon_get_ib_value(p, p->idx + 10) *
564 8 * 3 / 2;
565 break;
566
501 case 0x04000001: // config extension 567 case 0x04000001: // config extension
502 case 0x04000002: // pic control 568 case 0x04000002: // pic control
503 case 0x04000005: // rate control 569 case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
506 break; 572 break;
507 573
508 case 0x03000001: // encode 574 case 0x03000001: // encode
509 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); 575 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
576 *size);
510 if (r) 577 if (r)
511 return r; 578 return r;
512 579
513 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); 580 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
581 *size / 3);
514 if (r) 582 if (r)
515 return r; 583 return r;
516 break; 584 break;
517 585
518 case 0x02000001: // destroy 586 case 0x02000001: // destroy
519 destroy = true; 587 destroyed = true;
520 break; 588 break;
521 589
522 case 0x05000001: // context buffer 590 case 0x05000001: // context buffer
591 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
592 *size * 2);
593 if (r)
594 return r;
595 break;
596
523 case 0x05000004: // video bitstream buffer 597 case 0x05000004: // video bitstream buffer
598 tmp = radeon_get_ib_value(p, p->idx + 4);
599 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
600 tmp);
601 if (r)
602 return r;
603 break;
604
524 case 0x05000005: // feedback buffer 605 case 0x05000005: // feedback buffer
525 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); 606 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
607 4096);
526 if (r) 608 if (r)
527 return r; 609 return r;
528 break; 610 break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
532 return -EINVAL; 614 return -EINVAL;
533 } 615 }
534 616
617 if (session_idx == -1) {
618 DRM_ERROR("no session command at start of IB\n");
619 return -EINVAL;
620 }
621
535 p->idx += len / 4; 622 p->idx += len / 4;
536 } 623 }
537 624
538 if (destroy) { 625 if (destroyed) {
539 /* IB contains a destroy msg, free the handle */ 626 /* IB contains a destroy msg, free the handle */
540 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 627 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
541 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 628 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
542
543 return 0;
544 }
545
546 /* create or encode, validate the handle */
547 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
548 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
549 return 0;
550 } 629 }
551 630
552 /* handle not found try to alloc a new one */ 631 return 0;
553 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
554 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
555 p->rdev->vce.filp[i] = p->filp;
556 return 0;
557 }
558 }
559
560 DRM_ERROR("No more free VCE handles!\n");
561 return -EINVAL;
562} 632}
563 633
564/** 634/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce49d32..1f426696de36 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx, size; 133 unsigned i, idx;
134 134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); 135 list = kmalloc_array(vm->max_pde_used + 1,
136 list = kmalloc(size, GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
595 ndw = 64; 595 ndw = 64;
596 596
597 /* assume the worst case */ 597 /* assume the worst case */
598 ndw += vm->max_pde_used * 12; 598 ndw += vm->max_pde_used * 16;
599 599
600 /* update too big for an IB */ 600 /* update too big for an IB */
601 if (ndw > 0xfffff) 601 if (ndw > 0xfffff)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f84931..7321283602ce 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
107#define SPLL_CHG_STATUS (1 << 1) 107#define SPLL_CHG_STATUS (1 << 1)
108#define SPLL_CNTL_MODE 0x618 108#define SPLL_CNTL_MODE 0x618
109#define SPLL_SW_DIR_CONTROL (1 << 0) 109#define SPLL_SW_DIR_CONTROL (1 << 0)
110# define SPLL_REFCLK_SEL(x) ((x) << 8) 110# define SPLL_REFCLK_SEL(x) ((x) << 26)
111# define SPLL_REFCLK_SEL_MASK 0xFF00 111# define SPLL_REFCLK_SEL_MASK (3 << 26)
112 112
113#define CG_SPLL_SPREAD_SPECTRUM 0x620 113#define CG_SPLL_SPREAD_SPECTRUM 0x620
114#define SSEN (1 << 0) 114#define SSEN (1 << 0)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support"
1056 depends on (!OF && !IIO) || (OF && IIO) 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
1059 interpretation. The driver can also monitor the temperature and 1059 interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
44 unsigned int ohm; 44 unsigned int ohm;
45}; 45};
46 46
47/* Order matters, ntc_match references the entries by index */
47static const struct platform_device_id ntc_thermistor_id[] = { 48static const struct platform_device_id ntc_thermistor_id[] = {
48 { "ncp15wb473", TYPE_NCPXXWB473 }, 49 { "ncp15wb473", TYPE_NCPXXWB473 },
49 { "ncp18wb473", TYPE_NCPXXWB473 }, 50 { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
141 char name[PLATFORM_NAME_SIZE]; 142 char name[PLATFORM_NAME_SIZE];
142}; 143};
143 144
144#ifdef CONFIG_OF 145#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 146static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 147{
147 struct iio_channel *channel = pdata->chan; 148 struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163 164
164static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
165 { .compatible = "ntc,ncp15wb473", 166 { .compatible = "ntc,ncp15wb473",
166 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 167 .data = &ntc_thermistor_id[0] },
167 { .compatible = "ntc,ncp18wb473", 168 { .compatible = "ntc,ncp18wb473",
168 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 169 .data = &ntc_thermistor_id[1] },
169 { .compatible = "ntc,ncp21wb473", 170 { .compatible = "ntc,ncp21wb473",
170 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 171 .data = &ntc_thermistor_id[2] },
171 { .compatible = "ntc,ncp03wb473", 172 { .compatible = "ntc,ncp03wb473",
172 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 173 .data = &ntc_thermistor_id[3] },
173 { .compatible = "ntc,ncp15wl333", 174 { .compatible = "ntc,ncp15wl333",
174 .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, 175 .data = &ntc_thermistor_id[4] },
175 { }, 176 { },
176}; 177};
177MODULE_DEVICE_TABLE(of, ntc_match); 178MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
223 return NULL; 224 return NULL;
224} 225}
225 226
227#define ntc_match NULL
228
226static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) 229static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
227{ } 230{ }
228#endif 231#endif
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 22e92c3d3d07..3c20e4bd6dd1 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
422 */ 422 */
423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); 423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
424 424
425 /* enforce disabled interrupts (due to HW issues) */
426 i2c_dw_disable_int(dev);
427
425 /* Enable the adapter */ 428 /* Enable the adapter */
426 __i2c_dw_enable(dev, true); 429 __i2c_dw_enable(dev, true);
427 430
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 28cbe1b2a2ec..32c85e9ecdae 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
999 999
1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, 1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
1001 resource_size(&adev->res)); 1001 resource_size(&adev->res));
1002 if (IS_ERR(dev->virtbase)) { 1002 if (!dev->virtbase) {
1003 ret = -ENOMEM; 1003 ret = -ENOMEM;
1004 goto err_no_mem; 1004 goto err_no_mem;
1005 } 1005 }
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 1b4cf14f1106..2a5efb5b487c 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
479 int ret, idx; 479 int ret, idx;
480 480
481 ret = pm_runtime_get_sync(qup->dev); 481 ret = pm_runtime_get_sync(qup->dev);
482 if (ret) 482 if (ret < 0)
483 goto out; 483 goto out;
484 484
485 writel(1, qup->base + QUP_SW_RESET); 485 writel(1, qup->base + QUP_SW_RESET);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d4fa8eba6e9d..06d47aafbb79 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
561 561
562 ret = -EINVAL; 562 ret = -EINVAL;
563 for (i = 0; i < num; i++) { 563 for (i = 0; i < num; i++) {
564 /* This HW can't send STOP after address phase */
565 if (msgs[i].len == 0) {
566 ret = -EOPNOTSUPP;
567 break;
568 }
569
564 /*-------------- spin lock -----------------*/ 570 /*-------------- spin lock -----------------*/
565 spin_lock_irqsave(&priv->lock, flags); 571 spin_lock_irqsave(&priv->lock, flags);
566 572
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
625 631
626static u32 rcar_i2c_func(struct i2c_adapter *adap) 632static u32 rcar_i2c_func(struct i2c_adapter *adap)
627{ 633{
628 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 634 /* This HW can't do SMBUS_QUICK and NOSTART */
635 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
629} 636}
630 637
631static const struct i2c_algorithm rcar_i2c_algo = { 638static const struct i2c_algorithm rcar_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ae4491062e41..bb3a9964f7e0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
1276 struct platform_device *pdev = to_platform_device(dev); 1276 struct platform_device *pdev = to_platform_device(dev);
1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1278 1278
1279 i2c->suspended = 0;
1280 clk_prepare_enable(i2c->clk); 1279 clk_prepare_enable(i2c->clk);
1281 s3c24xx_i2c_init(i2c); 1280 s3c24xx_i2c_init(i2c);
1282 clk_disable_unprepare(i2c->clk); 1281 clk_disable_unprepare(i2c->clk);
1282 i2c->suspended = 0;
1283 1283
1284 return 0; 1284 return 0;
1285} 1285}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
48 48
49#include <linux/mlx4/driver.h> 49#include <linux/mlx4/driver.h>
50#include <linux/mlx4/cmd.h> 50#include <linux/mlx4/cmd.h>
51#include <linux/mlx4/qp.h>
51 52
52#include "mlx4_ib.h" 53#include "mlx4_ib.h"
53#include "user.h" 54#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1614} 1615}
1615#endif 1616#endif
1616 1617
1618#define MLX4_IB_INVALID_MAC ((u64)-1)
1619static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1620 struct net_device *dev,
1621 int port)
1622{
1623 u64 new_smac = 0;
1624 u64 release_mac = MLX4_IB_INVALID_MAC;
1625 struct mlx4_ib_qp *qp;
1626
1627 read_lock(&dev_base_lock);
1628 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1629 read_unlock(&dev_base_lock);
1630
1631 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1632 qp = ibdev->qp1_proxy[port - 1];
1633 if (qp) {
1634 int new_smac_index;
1635 u64 old_smac = qp->pri.smac;
1636 struct mlx4_update_qp_params update_params;
1637
1638 if (new_smac == old_smac)
1639 goto unlock;
1640
1641 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1642
1643 if (new_smac_index < 0)
1644 goto unlock;
1645
1646 update_params.smac_index = new_smac_index;
1647 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
1648 &update_params)) {
1649 release_mac = new_smac;
1650 goto unlock;
1651 }
1652
1653 qp->pri.smac = new_smac;
1654 qp->pri.smac_index = new_smac_index;
1655
1656 release_mac = old_smac;
1657 }
1658
1659unlock:
1660 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1661 if (release_mac != MLX4_IB_INVALID_MAC)
1662 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1663}
1664
1617static void mlx4_ib_get_dev_addr(struct net_device *dev, 1665static void mlx4_ib_get_dev_addr(struct net_device *dev,
1618 struct mlx4_ib_dev *ibdev, u8 port) 1666 struct mlx4_ib_dev *ibdev, u8 port)
1619{ 1667{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1689 return 0; 1737 return 0;
1690} 1738}
1691 1739
1692static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) 1740static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1741 struct net_device *dev,
1742 unsigned long event)
1743
1693{ 1744{
1694 struct mlx4_ib_iboe *iboe; 1745 struct mlx4_ib_iboe *iboe;
1746 int update_qps_port = -1;
1695 int port; 1747 int port;
1696 1748
1697 iboe = &ibdev->iboe; 1749 iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1719 } 1771 }
1720 curr_master = iboe->masters[port - 1]; 1772 curr_master = iboe->masters[port - 1];
1721 1773
1774 if (dev == iboe->netdevs[port - 1] &&
1775 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1776 event == NETDEV_UP || event == NETDEV_CHANGE))
1777 update_qps_port = port;
1778
1722 if (curr_netdev) { 1779 if (curr_netdev) {
1723 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? 1780 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1724 IB_PORT_ACTIVE : IB_PORT_DOWN; 1781 IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1752 } 1809 }
1753 1810
1754 spin_unlock(&iboe->lock); 1811 spin_unlock(&iboe->lock);
1812
1813 if (update_qps_port > 0)
1814 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1755} 1815}
1756 1816
1757static int mlx4_ib_netdev_event(struct notifier_block *this, 1817static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
1764 return NOTIFY_DONE; 1824 return NOTIFY_DONE;
1765 1825
1766 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 1826 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1767 mlx4_ib_scan_netdevs(ibdev); 1827 mlx4_ib_scan_netdevs(ibdev, dev, event);
1768 1828
1769 return NOTIFY_DONE; 1829 return NOTIFY_DONE;
1770} 1830}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2043 goto err_map; 2103 goto err_map;
2044 2104
2045 for (i = 0; i < ibdev->num_ports; ++i) { 2105 for (i = 0; i < ibdev->num_ports; ++i) {
2106 mutex_init(&ibdev->qp1_proxy_lock[i]);
2046 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2107 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2047 IB_LINK_LAYER_ETHERNET) { 2108 IB_LINK_LAYER_ETHERNET) {
2048 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); 2109 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2126 for (i = 1 ; i <= ibdev->num_ports ; ++i) 2187 for (i = 1 ; i <= ibdev->num_ports ; ++i)
2127 reset_gid_table(ibdev, i); 2188 reset_gid_table(ibdev, i);
2128 rtnl_lock(); 2189 rtnl_lock();
2129 mlx4_ib_scan_netdevs(ibdev); 2190 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2130 rtnl_unlock(); 2191 rtnl_unlock();
2131 mlx4_ib_init_gid_table(ibdev); 2192 mlx4_ib_init_gid_table(ibdev);
2132 } 2193 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
522 int steer_qpn_count; 522 int steer_qpn_count;
523 int steer_qpn_base; 523 int steer_qpn_base;
524 int steering_support; 524 int steering_support;
525 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
526 /* lock when destroying qp1_proxy and getting netdev events */
527 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
525}; 528};
526 529
527struct ib_event_work { 530struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
1132 if (is_qp0(dev, mqp)) 1132 if (is_qp0(dev, mqp))
1133 mlx4_CLOSE_PORT(dev->dev, mqp->port); 1133 mlx4_CLOSE_PORT(dev->dev, mqp->port);
1134 1134
1135 if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1136 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1137 dev->qp1_proxy[mqp->port - 1] = NULL;
1138 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1139 }
1140
1135 pd = get_pd(mqp); 1141 pd = get_pd(mqp);
1136 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); 1142 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
1137 1143
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1646 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1652 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1647 if (err) 1653 if (err)
1648 return -EINVAL; 1654 return -EINVAL;
1655 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1656 dev->qp1_proxy[qp->port - 1] = qp;
1649 } 1657 }
1650 } 1658 }
1651 } 1659 }
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index c98fdb185931..a1710465faaf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -28,6 +28,7 @@
28#include <target/target_core_base.h> 28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h> 29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h> 30#include <target/iscsi/iscsi_transport.h>
31#include <linux/semaphore.h>
31 32
32#include "isert_proto.h" 33#include "isert_proto.h"
33#include "ib_isert.h" 34#include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
561 struct isert_device *device; 562 struct isert_device *device;
562 struct ib_device *ib_dev = cma_id->device; 563 struct ib_device *ib_dev = cma_id->device;
563 int ret = 0; 564 int ret = 0;
564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 565 u8 pi_support;
566
567 spin_lock_bh(&np->np_thread_lock);
568 if (!np->enabled) {
569 spin_unlock_bh(&np->np_thread_lock);
570 pr_debug("iscsi_np is not enabled, reject connect request\n");
571 return rdma_reject(cma_id, NULL, 0);
572 }
573 spin_unlock_bh(&np->np_thread_lock);
565 574
566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 575 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
567 cma_id, cma_id->context); 576 cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
652 goto out_mr; 661 goto out_mr;
653 } 662 }
654 663
664 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
655 if (pi_support && !device->pi_capable) { 665 if (pi_support && !device->pi_capable) {
656 pr_err("Protection information requested but not supported\n"); 666 pr_err("Protection information requested but not supported\n");
657 ret = -EINVAL; 667 ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
663 goto out_conn_dev; 673 goto out_conn_dev;
664 674
665 mutex_lock(&isert_np->np_accept_mutex); 675 mutex_lock(&isert_np->np_accept_mutex);
666 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); 676 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
667 mutex_unlock(&isert_np->np_accept_mutex); 677 mutex_unlock(&isert_np->np_accept_mutex);
668 678
669 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); 679 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
670 wake_up(&isert_np->np_accept_wq); 680 up(&isert_np->np_sem);
671 return 0; 681 return 0;
672 682
673out_conn_dev: 683out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
2999 pr_err("Unable to allocate struct isert_np\n"); 3009 pr_err("Unable to allocate struct isert_np\n");
3000 return -ENOMEM; 3010 return -ENOMEM;
3001 } 3011 }
3002 init_waitqueue_head(&isert_np->np_accept_wq); 3012 sema_init(&isert_np->np_sem, 0);
3003 mutex_init(&isert_np->np_accept_mutex); 3013 mutex_init(&isert_np->np_accept_mutex);
3004 INIT_LIST_HEAD(&isert_np->np_accept_list); 3014 INIT_LIST_HEAD(&isert_np->np_accept_list);
3005 init_completion(&isert_np->np_login_comp); 3015 init_completion(&isert_np->np_login_comp);
@@ -3048,18 +3058,6 @@ out:
3048} 3058}
3049 3059
3050static int 3060static int
3051isert_check_accept_queue(struct isert_np *isert_np)
3052{
3053 int empty;
3054
3055 mutex_lock(&isert_np->np_accept_mutex);
3056 empty = list_empty(&isert_np->np_accept_list);
3057 mutex_unlock(&isert_np->np_accept_mutex);
3058
3059 return empty;
3060}
3061
3062static int
3063isert_rdma_accept(struct isert_conn *isert_conn) 3061isert_rdma_accept(struct isert_conn *isert_conn)
3064{ 3062{
3065 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3063 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3151 int max_accept = 0, ret; 3149 int max_accept = 0, ret;
3152 3150
3153accept_wait: 3151accept_wait:
3154 ret = wait_event_interruptible(isert_np->np_accept_wq, 3152 ret = down_interruptible(&isert_np->np_sem);
3155 !isert_check_accept_queue(isert_np) ||
3156 np->np_thread_state == ISCSI_NP_THREAD_RESET);
3157 if (max_accept > 5) 3153 if (max_accept > 5)
3158 return -ENODEV; 3154 return -ENODEV;
3159 3155
3160 spin_lock_bh(&np->np_thread_lock); 3156 spin_lock_bh(&np->np_thread_lock);
3161 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 3157 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
3162 spin_unlock_bh(&np->np_thread_lock); 3158 spin_unlock_bh(&np->np_thread_lock);
3163 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3159 pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
3164 return -ENODEV; 3160 return -ENODEV;
3165 } 3161 }
3166 spin_unlock_bh(&np->np_thread_lock); 3162 spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 4c072ae34c01..da6612e68000 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -182,7 +182,7 @@ struct isert_device {
182}; 182};
183 183
184struct isert_np { 184struct isert_np {
185 wait_queue_head_t np_accept_wq; 185 struct semaphore np_sem;
186 struct rdma_cm_id *np_cm_id; 186 struct rdma_cm_id *np_cm_id;
187 struct mutex np_accept_mutex; 187 struct mutex np_accept_mutex;
188 struct list_head np_accept_list; 188 struct list_head np_accept_list;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
71 default y 71 default y
72 select SERIO 72 select SERIO
73 select SERIO_LIBPS2 73 select SERIO_LIBPS2
74 select SERIO_I8042 if X86 74 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
75 select SERIO_GSCPS2 if GSC 75 select SERIO_GSCPS2 if GSC
76 help 76 help
77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually 77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
111 unsigned short keycodes[MAX_KEYPAD_KEYS]; 111 unsigned short keycodes[MAX_KEYPAD_KEYS];
112 int rotary_rel_code[2]; 112 int rotary_rel_code[2];
113 113
114 unsigned int row_shift;
115
114 /* state row bits of each column scan */ 116 /* state row bits of each column scan */
115 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; 117 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
116 uint32_t direct_key_state; 118 uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
467 if ((bits_changed & (1 << row)) == 0) 469 if ((bits_changed & (1 << row)) == 0)
468 continue; 470 continue;
469 471
470 code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); 472 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
473
471 input_event(input_dev, EV_MSC, MSC_SCAN, code); 474 input_event(input_dev, EV_MSC, MSC_SCAN, code);
472 input_report_key(input_dev, keypad->keycodes[code], 475 input_report_key(input_dev, keypad->keycodes[code],
473 new_state[col] & (1 << row)); 476 new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
802 goto failed_put_clk; 805 goto failed_put_clk;
803 } 806 }
804 807
808 keypad->row_shift = get_count_order(pdata->matrix_key_cols);
809
805 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || 810 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
806 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { 811 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
807 input_dev->evbit[0] |= BIT_MASK(EV_REL); 812 input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86 20 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you have a PS/2 mouse connected to your system. This 23 Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d68d33fb5ac2..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
117} 117}
118 118
119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS 119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
120struct min_max_quirk {
121 const char * const *pnp_ids;
122 int x_min, x_max, y_min, y_max;
123};
124
125static const struct min_max_quirk min_max_pnpid_table[] = {
126 {
127 (const char * const []){"LEN0033", NULL},
128 1024, 5052, 2258, 4832
129 },
130 {
131 (const char * const []){"LEN0035", "LEN0042", NULL},
132 1232, 5710, 1156, 4696
133 },
134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
136 1024, 5112, 2024, 4832
137 },
138 {
139 (const char * const []){"LEN2001", NULL},
140 1024, 5022, 2508, 4832
141 },
142 { }
143};
144
120/* This list has been kindly provided by Synaptics. */ 145/* This list has been kindly provided by Synaptics. */
121static const char * const topbuttonpad_pnp_ids[] = { 146static const char * const topbuttonpad_pnp_ids[] = {
122 "LEN0017", 147 "LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
129 "LEN002D", 154 "LEN002D",
130 "LEN002E", 155 "LEN002E",
131 "LEN0033", /* Helix */ 156 "LEN0033", /* Helix */
132 "LEN0034", /* T431s, T540, X1 Carbon 2nd */ 157 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
133 "LEN0035", /* X240 */ 158 "LEN0035", /* X240 */
134 "LEN0036", /* T440 */ 159 "LEN0036", /* T440 */
135 "LEN0037", 160 "LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
142 "LEN0048", 167 "LEN0048",
143 "LEN0049", 168 "LEN0049",
144 "LEN2000", 169 "LEN2000",
145 "LEN2001", 170 "LEN2001", /* Edge E431 */
146 "LEN2002", 171 "LEN2002",
147 "LEN2003", 172 "LEN2003",
148 "LEN2004", /* L440 */ 173 "LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
156 NULL 181 NULL
157}; 182};
158 183
184static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
185{
186 int i;
187
188 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
189 for (i = 0; ids[i]; i++)
190 if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
191 return true;
192
193 return false;
194}
195
159/***************************************************************************** 196/*****************************************************************************
160 * Synaptics communications functions 197 * Synaptics communications functions
161 ****************************************************************************/ 198 ****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
304 * Resolution is left zero if touchpad does not support the query 341 * Resolution is left zero if touchpad does not support the query
305 */ 342 */
306 343
307static const int *quirk_min_max;
308
309static int synaptics_resolution(struct psmouse *psmouse) 344static int synaptics_resolution(struct psmouse *psmouse)
310{ 345{
311 struct synaptics_data *priv = psmouse->private; 346 struct synaptics_data *priv = psmouse->private;
312 unsigned char resp[3]; 347 unsigned char resp[3];
348 int i;
313 349
314 if (quirk_min_max) { 350 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
315 priv->x_min = quirk_min_max[0]; 351 if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
316 priv->x_max = quirk_min_max[1]; 352 priv->x_min = min_max_pnpid_table[i].x_min;
317 priv->y_min = quirk_min_max[2]; 353 priv->x_max = min_max_pnpid_table[i].x_max;
318 priv->y_max = quirk_min_max[3]; 354 priv->y_min = min_max_pnpid_table[i].y_min;
319 return 0; 355 priv->y_max = min_max_pnpid_table[i].y_max;
320 } 356 return 0;
357 }
321 358
322 if (SYN_ID_MAJOR(priv->identity) < 4) 359 if (SYN_ID_MAJOR(priv->identity) < 4)
323 return 0; 360 return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
1365 1402
1366 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1403 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1367 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1404 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1368 /* See if this buttonpad has a top button area */ 1405 if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
1369 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { 1406 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1370 for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
1371 if (strstr(psmouse->ps2dev.serio->firmware_id,
1372 topbuttonpad_pnp_ids[i])) {
1373 __set_bit(INPUT_PROP_TOPBUTTONPAD,
1374 dev->propbit);
1375 break;
1376 }
1377 }
1378 }
1379 /* Clickpads report only left button */ 1407 /* Clickpads report only left button */
1380 __clear_bit(BTN_RIGHT, dev->keybit); 1408 __clear_bit(BTN_RIGHT, dev->keybit);
1381 __clear_bit(BTN_MIDDLE, dev->keybit); 1409 __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
1547 { } 1575 { }
1548}; 1576};
1549 1577
1550static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1551#if defined(CONFIG_DMI)
1552 {
1553 /* Lenovo ThinkPad Helix */
1554 .matches = {
1555 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1556 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
1557 },
1558 .driver_data = (int []){1024, 5052, 2258, 4832},
1559 },
1560 {
1561 /* Lenovo ThinkPad X240 */
1562 .matches = {
1563 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1564 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
1565 },
1566 .driver_data = (int []){1232, 5710, 1156, 4696},
1567 },
1568 {
1569 /* Lenovo ThinkPad Edge E431 */
1570 .matches = {
1571 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1572 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
1573 },
1574 .driver_data = (int []){1024, 5022, 2508, 4832},
1575 },
1576 {
1577 /* Lenovo ThinkPad T431s */
1578 .matches = {
1579 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1580 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
1581 },
1582 .driver_data = (int []){1024, 5112, 2024, 4832},
1583 },
1584 {
1585 /* Lenovo ThinkPad T440s */
1586 .matches = {
1587 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1588 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
1589 },
1590 .driver_data = (int []){1024, 5112, 2024, 4832},
1591 },
1592 {
1593 /* Lenovo ThinkPad L440 */
1594 .matches = {
1595 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1596 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
1597 },
1598 .driver_data = (int []){1024, 5112, 2024, 4832},
1599 },
1600 {
1601 /* Lenovo ThinkPad T540p */
1602 .matches = {
1603 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1604 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1605 },
1606 .driver_data = (int []){1024, 5056, 2058, 4832},
1607 },
1608 {
1609 /* Lenovo ThinkPad L540 */
1610 .matches = {
1611 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1612 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
1613 },
1614 .driver_data = (int []){1024, 5112, 2024, 4832},
1615 },
1616 {
1617 /* Lenovo Yoga S1 */
1618 .matches = {
1619 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1620 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
1621 "ThinkPad S1 Yoga"),
1622 },
1623 .driver_data = (int []){1232, 5710, 1156, 4696},
1624 },
1625 {
1626 /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
1627 .matches = {
1628 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1629 DMI_MATCH(DMI_PRODUCT_VERSION,
1630 "ThinkPad X1 Carbon 2nd"),
1631 },
1632 .driver_data = (int []){1024, 5112, 2024, 4832},
1633 },
1634#endif
1635 { }
1636};
1637
1638void __init synaptics_module_init(void) 1578void __init synaptics_module_init(void)
1639{ 1579{
1640 const struct dmi_system_id *min_max_dmi;
1641
1642 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); 1580 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
1643 broken_olpc_ec = dmi_check_system(olpc_dmi_table); 1581 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
1644
1645 min_max_dmi = dmi_first_match(min_max_dmi_table);
1646 if (min_max_dmi)
1647 quirk_min_max = min_max_dmi->driver_data;
1648} 1582}
1649 1583
1650static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) 1584static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
79 writeb(divisor, KMICLKDIV); 79 writeb(divisor, KMICLKDIV);
80 writeb(KMICR_EN, KMICR); 80 writeb(KMICR_EN, KMICR);
81 81
82 ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); 82 ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
83 kmi);
83 if (ret) { 84 if (ret) {
84 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); 85 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
85 writeb(0, KMICR); 86 writeb(0, KMICR);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..b845e9370871 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
640 640
641config TOUCHSCREEN_WM97XX_ATMEL 641config TOUCHSCREEN_WM97XX_ATMEL
642 tristate "WM97xx Atmel accelerated touch" 642 tristate "WM97xx Atmel accelerated touch"
643 depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) 643 depends on TOUCHSCREEN_WM97XX && AVR32
644 help 644 help
645 Say Y here for support for streaming mode with WM97xx touchscreens 645 Say Y here for support for streaming mode with WM97xx touchscreens
646 on Atmel AT91 or AVR32 systems with an AC97C module. 646 on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c949520bd196..57068e8035b5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3999 iommu_flush_dte(iommu, devid); 3999 iommu_flush_dte(iommu, devid);
4000 if (devid != alias) { 4000 if (devid != alias) {
4001 irq_lookup_table[alias] = table; 4001 irq_lookup_table[alias] = table;
4002 set_dte_irq_entry(devid, table); 4002 set_dte_irq_entry(alias, table);
4003 iommu_flush_dte(iommu, alias); 4003 iommu_flush_dte(iommu, alias);
4004 } 4004 }
4005 4005
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b76c58dbe30c..0e08545d7298 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
788 * per device. But we can enable the exclusion range per 788 * per device. But we can enable the exclusion range per
789 * device. This is done here 789 * device. This is done here
790 */ 790 */
791 set_dev_entry_bit(m->devid, DEV_ENTRY_EX); 791 set_dev_entry_bit(devid, DEV_ENTRY_EX);
792 iommu->exclusion_start = m->range_start; 792 iommu->exclusion_start = m->range_start;
793 iommu->exclusion_length = m->range_length; 793 iommu->exclusion_length = m->range_length;
794 } 794 }
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 5208828792e6..203b2e6a91cf 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work)
504 504
505 write = !!(fault->flags & PPR_FAULT_WRITE); 505 write = !!(fault->flags & PPR_FAULT_WRITE);
506 506
507 down_read(&fault->state->mm->mmap_sem);
507 npages = get_user_pages(fault->state->task, fault->state->mm, 508 npages = get_user_pages(fault->state->task, fault->state->mm,
508 fault->address, 1, write, 0, &page, NULL); 509 fault->address, 1, write, 0, &page, NULL);
510 up_read(&fault->state->mm->mmap_sem);
509 511
510 if (npages == 1) { 512 if (npages == 1) {
511 put_page(page); 513 put_page(page);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9380be7b1895..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2178 ti->num_discard_bios = 1; 2178 ti->num_discard_bios = 1;
2179 ti->discards_supported = true; 2179 ti->discards_supported = true;
2180 ti->discard_zeroes_data_unsupported = true; 2180 ti->discard_zeroes_data_unsupported = true;
2181 /* Discard bios must be split on a block boundary */
2182 ti->split_discard_bios = true;
2181 2183
2182 cache->features = ca->features; 2184 cache->features = ca->features;
2183 ti->per_bio_data_size = get_per_bio_data_size(cache); 2185 ti->per_bio_data_size = get_per_bio_data_size(cache);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 784695d22fde..53b213226c01 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,6 @@
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/backing-dev.h> 21#include <linux/backing-dev.h>
22#include <linux/percpu.h>
23#include <linux/atomic.h> 22#include <linux/atomic.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/page.h> 24#include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
43 struct bvec_iter iter_out; 42 struct bvec_iter iter_out;
44 sector_t cc_sector; 43 sector_t cc_sector;
45 atomic_t cc_pending; 44 atomic_t cc_pending;
45 struct ablkcipher_request *req;
46}; 46};
47 47
48/* 48/*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
112 112
113/* 113/*
114 * Duplicated per-CPU state for cipher. 114 * The fields in here must be read only after initialization.
115 */
116struct crypt_cpu {
117 struct ablkcipher_request *req;
118};
119
120/*
121 * The fields in here must be read only after initialization,
122 * changing state should be in crypt_cpu.
123 */ 115 */
124struct crypt_config { 116struct crypt_config {
125 struct dm_dev *dev; 117 struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
150 sector_t iv_offset; 142 sector_t iv_offset;
151 unsigned int iv_size; 143 unsigned int iv_size;
152 144
153 /*
154 * Duplicated per cpu state. Access through
155 * per_cpu_ptr() only.
156 */
157 struct crypt_cpu __percpu *cpu;
158
159 /* ESSIV: struct crypto_cipher *essiv_tfm */ 145 /* ESSIV: struct crypto_cipher *essiv_tfm */
160 void *iv_private; 146 void *iv_private;
161 struct crypto_ablkcipher **tfms; 147 struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
192static void kcryptd_queue_crypt(struct dm_crypt_io *io); 178static void kcryptd_queue_crypt(struct dm_crypt_io *io);
193static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 179static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
194 180
195static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
196{
197 return this_cpu_ptr(cc->cpu);
198}
199
200/* 181/*
201 * Use this to access cipher attributes that are the same for each CPU. 182 * Use this to access cipher attributes that are the same for each CPU.
202 */ 183 */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
903static void crypt_alloc_req(struct crypt_config *cc, 884static void crypt_alloc_req(struct crypt_config *cc,
904 struct convert_context *ctx) 885 struct convert_context *ctx)
905{ 886{
906 struct crypt_cpu *this_cc = this_crypt_config(cc);
907 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 887 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
908 888
909 if (!this_cc->req) 889 if (!ctx->req)
910 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 890 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
911 891
912 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); 892 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
913 ablkcipher_request_set_callback(this_cc->req, 893 ablkcipher_request_set_callback(ctx->req,
914 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 894 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
915 kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
916} 896}
917 897
918/* 898/*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
921static int crypt_convert(struct crypt_config *cc, 901static int crypt_convert(struct crypt_config *cc,
922 struct convert_context *ctx) 902 struct convert_context *ctx)
923{ 903{
924 struct crypt_cpu *this_cc = this_crypt_config(cc);
925 int r; 904 int r;
926 905
927 atomic_set(&ctx->cc_pending, 1); 906 atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
932 911
933 atomic_inc(&ctx->cc_pending); 912 atomic_inc(&ctx->cc_pending);
934 913
935 r = crypt_convert_block(cc, ctx, this_cc->req); 914 r = crypt_convert_block(cc, ctx, ctx->req);
936 915
937 switch (r) { 916 switch (r) {
938 /* async */ 917 /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
941 reinit_completion(&ctx->restart); 920 reinit_completion(&ctx->restart);
942 /* fall through*/ 921 /* fall through*/
943 case -EINPROGRESS: 922 case -EINPROGRESS:
944 this_cc->req = NULL; 923 ctx->req = NULL;
945 ctx->cc_sector++; 924 ctx->cc_sector++;
946 continue; 925 continue;
947 926
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1040 io->sector = sector; 1019 io->sector = sector;
1041 io->error = 0; 1020 io->error = 0;
1042 io->base_io = NULL; 1021 io->base_io = NULL;
1022 io->ctx.req = NULL;
1043 atomic_set(&io->io_pending, 0); 1023 atomic_set(&io->io_pending, 0);
1044 1024
1045 return io; 1025 return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1065 if (!atomic_dec_and_test(&io->io_pending)) 1045 if (!atomic_dec_and_test(&io->io_pending))
1066 return; 1046 return;
1067 1047
1048 if (io->ctx.req)
1049 mempool_free(io->ctx.req, cc->req_pool);
1068 mempool_free(io, cc->io_pool); 1050 mempool_free(io, cc->io_pool);
1069 1051
1070 if (likely(!base_io)) 1052 if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
1492static void crypt_dtr(struct dm_target *ti) 1474static void crypt_dtr(struct dm_target *ti)
1493{ 1475{
1494 struct crypt_config *cc = ti->private; 1476 struct crypt_config *cc = ti->private;
1495 struct crypt_cpu *cpu_cc;
1496 int cpu;
1497 1477
1498 ti->private = NULL; 1478 ti->private = NULL;
1499 1479
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
1505 if (cc->crypt_queue) 1485 if (cc->crypt_queue)
1506 destroy_workqueue(cc->crypt_queue); 1486 destroy_workqueue(cc->crypt_queue);
1507 1487
1508 if (cc->cpu)
1509 for_each_possible_cpu(cpu) {
1510 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1511 if (cpu_cc->req)
1512 mempool_free(cpu_cc->req, cc->req_pool);
1513 }
1514
1515 crypt_free_tfms(cc); 1488 crypt_free_tfms(cc);
1516 1489
1517 if (cc->bs) 1490 if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
1530 if (cc->dev) 1503 if (cc->dev)
1531 dm_put_device(ti, cc->dev); 1504 dm_put_device(ti, cc->dev);
1532 1505
1533 if (cc->cpu)
1534 free_percpu(cc->cpu);
1535
1536 kzfree(cc->cipher); 1506 kzfree(cc->cipher);
1537 kzfree(cc->cipher_string); 1507 kzfree(cc->cipher_string);
1538 1508
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1588 if (tmp) 1558 if (tmp)
1589 DMWARN("Ignoring unexpected additional cipher options"); 1559 DMWARN("Ignoring unexpected additional cipher options");
1590 1560
1591 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1592 __alignof__(struct crypt_cpu));
1593 if (!cc->cpu) {
1594 ti->error = "Cannot allocate per cpu state";
1595 goto bad_mem;
1596 }
1597
1598 /* 1561 /*
1599 * For compatibility with the original dm-crypt mapping format, if 1562 * For compatibility with the original dm-crypt mapping format, if
1600 * only the cipher name is supplied, use cbc-plain. 1563 * only the cipher name is supplied, use cbc-plain.
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa009e865871..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
445 else 445 else
446 m->saved_queue_if_no_path = queue_if_no_path; 446 m->saved_queue_if_no_path = queue_if_no_path;
447 m->queue_if_no_path = queue_if_no_path; 447 m->queue_if_no_path = queue_if_no_path;
448 if (!m->queue_if_no_path)
449 dm_table_run_md_queue_async(m->ti->table);
450
451 spin_unlock_irqrestore(&m->lock, flags); 448 spin_unlock_irqrestore(&m->lock, flags);
452 449
450 if (!queue_if_no_path)
451 dm_table_run_md_queue_async(m->ti->table);
452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -954,7 +954,7 @@ out:
954 */ 954 */
955static int reinstate_path(struct pgpath *pgpath) 955static int reinstate_path(struct pgpath *pgpath)
956{ 956{
957 int r = 0; 957 int r = 0, run_queue = 0;
958 unsigned long flags; 958 unsigned long flags;
959 struct multipath *m = pgpath->pg->m; 959 struct multipath *m = pgpath->pg->m;
960 960
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
978 978
979 if (!m->nr_valid_paths++) { 979 if (!m->nr_valid_paths++) {
980 m->current_pgpath = NULL; 980 m->current_pgpath = NULL;
981 dm_table_run_md_queue_async(m->ti->table); 981 run_queue = 1;
982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
984 m->pg_init_in_progress++; 984 m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
991 991
992out: 992out:
993 spin_unlock_irqrestore(&m->lock, flags); 993 spin_unlock_irqrestore(&m->lock, flags);
994 if (run_queue)
995 dm_table_run_md_queue_async(m->ti->table);
994 996
995 return r; 997 return r;
996} 998}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 13abade76ad9..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,6 +27,9 @@
27#define MAPPING_POOL_SIZE 1024 27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024 28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 29#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
30 33
31DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
32 "A percentage of time allocated for copy on write"); 35 "A percentage of time allocated for copy on write");
@@ -175,6 +178,7 @@ struct pool {
175 struct workqueue_struct *wq; 178 struct workqueue_struct *wq;
176 struct work_struct worker; 179 struct work_struct worker;
177 struct delayed_work waker; 180 struct delayed_work waker;
181 struct delayed_work no_space_timeout;
178 182
179 unsigned long last_commit_jiffies; 183 unsigned long last_commit_jiffies;
180 unsigned ref_count; 184 unsigned ref_count;
@@ -935,7 +939,7 @@ static int commit(struct pool *pool)
935{ 939{
936 int r; 940 int r;
937 941
938 if (get_pool_mode(pool) != PM_WRITE) 942 if (get_pool_mode(pool) >= PM_READ_ONLY)
939 return -EINVAL; 943 return -EINVAL;
940 944
941 r = dm_pool_commit_metadata(pool->pmd); 945 r = dm_pool_commit_metadata(pool->pmd);
@@ -1590,6 +1594,20 @@ static void do_waker(struct work_struct *ws)
1590 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1594 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1591} 1595}
1592 1596
1597/*
1598 * We're holding onto IO to allow userland time to react. After the
1599 * timeout either the pool will have been resized (and thus back in
1600 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1601 */
1602static void do_no_space_timeout(struct work_struct *ws)
1603{
1604 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1605 no_space_timeout);
1606
1607 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1608 set_pool_mode(pool, PM_READ_ONLY);
1609}
1610
1593/*----------------------------------------------------------------*/ 1611/*----------------------------------------------------------------*/
1594 1612
1595struct noflush_work { 1613struct noflush_work {
@@ -1654,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1654 struct pool_c *pt = pool->ti->private; 1672 struct pool_c *pt = pool->ti->private;
1655 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1673 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1656 enum pool_mode old_mode = get_pool_mode(pool); 1674 enum pool_mode old_mode = get_pool_mode(pool);
1675 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1657 1676
1658 /* 1677 /*
1659 * Never allow the pool to transition to PM_WRITE mode if user 1678 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1715,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1715 pool->process_discard = process_discard; 1734 pool->process_discard = process_discard;
1716 pool->process_prepared_mapping = process_prepared_mapping; 1735 pool->process_prepared_mapping = process_prepared_mapping;
1717 pool->process_prepared_discard = process_prepared_discard_passdown; 1736 pool->process_prepared_discard = process_prepared_discard_passdown;
1737
1738 if (!pool->pf.error_if_no_space && no_space_timeout)
1739 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1718 break; 1740 break;
1719 1741
1720 case PM_WRITE: 1742 case PM_WRITE:
@@ -2100,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2100 2122
2101 INIT_WORK(&pool->worker, do_worker); 2123 INIT_WORK(&pool->worker, do_worker);
2102 INIT_DELAYED_WORK(&pool->waker, do_waker); 2124 INIT_DELAYED_WORK(&pool->waker, do_waker);
2125 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2103 spin_lock_init(&pool->lock); 2126 spin_lock_init(&pool->lock);
2104 bio_list_init(&pool->deferred_flush_bios); 2127 bio_list_init(&pool->deferred_flush_bios);
2105 INIT_LIST_HEAD(&pool->prepared_mappings); 2128 INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2662,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti)
2662 struct pool *pool = pt->pool; 2685 struct pool *pool = pt->pool;
2663 2686
2664 cancel_delayed_work(&pool->waker); 2687 cancel_delayed_work(&pool->waker);
2688 cancel_delayed_work(&pool->no_space_timeout);
2665 flush_workqueue(pool->wq); 2689 flush_workqueue(pool->wq);
2666 (void) commit(pool); 2690 (void) commit(pool);
2667} 2691}
@@ -3487,6 +3511,9 @@ static void dm_thin_exit(void)
3487module_init(dm_thin_init); 3511module_init(dm_thin_init);
3488module_exit(dm_thin_exit); 3512module_exit(dm_thin_exit);
3489 3513
3514module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3515MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3516
3490MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3517MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3491MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3518MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3492MODULE_LICENSE("GPL"); 3519MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e8a1ce204036..cdd7c1b7259b 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
1109 * windows that fall outside that. 1109 * windows that fall outside that.
1110 */ 1110 */
1111 for (i = 0; i < n_win_sizes; i++) { 1111 for (i = 0; i < n_win_sizes; i++) {
1112 struct ov7670_win_size *win = &info->devtype->win_sizes[index]; 1112 struct ov7670_win_size *win = &info->devtype->win_sizes[i];
1113 if (info->min_width && win->width < info->min_width) 1113 if (info->min_width && win->width < info->min_width)
1114 continue; 1114 continue;
1115 if (info->min_height && win->height < info->min_height) 1115 if (info->min_height && win->height < info->min_height)
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index a4459301b5f8..ee0f57e01b56 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
1616 if (ret < 0) 1616 if (ret < 0)
1617 return -EINVAL; 1617 return -EINVAL;
1618 1618
1619 node_ep = v4l2_of_get_next_endpoint(node, NULL); 1619 node_ep = of_graph_get_next_endpoint(node, NULL);
1620 if (!node_ep) { 1620 if (!node_ep) {
1621 dev_warn(dev, "no endpoint defined for node: %s\n", 1621 dev_warn(dev, "no endpoint defined for node: %s\n",
1622 node->full_name); 1622 node->full_name);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index d5a7a135f75d..703560fa5e73 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
93 struct media_entity *ent; 93 struct media_entity *ent;
94 struct media_entity_desc u_ent; 94 struct media_entity_desc u_ent;
95 95
96 memset(&u_ent, 0, sizeof(u_ent));
96 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) 97 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
97 return -EFAULT; 98 return -EFAULT;
98 99
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index b4f12d00be05..656708252962 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
372{ 372{
373 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 373 struct vpbe_fh *fh = vb2_get_drv_priv(vq);
374 struct vpbe_layer *layer = fh->layer; 374 struct vpbe_layer *layer = fh->layer;
375 struct vpbe_display *disp = fh->disp_dev;
376 unsigned long flags;
375 377
376 if (!vb2_is_streaming(vq)) 378 if (!vb2_is_streaming(vq))
377 return 0; 379 return 0;
378 380
379 /* release all active buffers */ 381 /* release all active buffers */
382 spin_lock_irqsave(&disp->dma_queue_lock, flags);
383 if (layer->cur_frm == layer->next_frm) {
384 vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
385 } else {
386 if (layer->cur_frm != NULL)
387 vb2_buffer_done(&layer->cur_frm->vb,
388 VB2_BUF_STATE_ERROR);
389 if (layer->next_frm != NULL)
390 vb2_buffer_done(&layer->next_frm->vb,
391 VB2_BUF_STATE_ERROR);
392 }
393
380 while (!list_empty(&layer->dma_queue)) { 394 while (!list_empty(&layer->dma_queue)) {
381 layer->next_frm = list_entry(layer->dma_queue.next, 395 layer->next_frm = list_entry(layer->dma_queue.next,
382 struct vpbe_disp_buffer, list); 396 struct vpbe_disp_buffer, list);
383 list_del(&layer->next_frm->list); 397 list_del(&layer->next_frm->list);
384 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); 398 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
385 } 399 }
386 400 spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
387 return 0; 401 return 0;
388} 402}
389 403
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index d762246eabf5..0379cb9f9a9c 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
734 } 734 }
735 vpfe_dev->io_usrs = 0; 735 vpfe_dev->io_usrs = 0;
736 vpfe_dev->numbuffers = config_params.numbuffers; 736 vpfe_dev->numbuffers = config_params.numbuffers;
737 videobuf_stop(&vpfe_dev->buffer_queue);
738 videobuf_mmap_free(&vpfe_dev->buffer_queue);
737 } 739 }
738 740
739 /* Decrement device usrs counter */ 741 /* Decrement device usrs counter */
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 756da78bac23..8dea0b84a3ad 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
358 358
359 common = &ch->common[VPIF_VIDEO_INDEX]; 359 common = &ch->common[VPIF_VIDEO_INDEX];
360 360
361 /* Disable channel as per its device type and channel id */
362 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
363 enable_channel0(0);
364 channel0_intr_enable(0);
365 }
366 if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
367 (2 == common->started)) {
368 enable_channel1(0);
369 channel1_intr_enable(0);
370 }
371 common->started = 0;
372
361 /* release all active buffers */ 373 /* release all active buffers */
362 spin_lock_irqsave(&common->irqlock, flags); 374 spin_lock_irqsave(&common->irqlock, flags);
375 if (common->cur_frm == common->next_frm) {
376 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
377 } else {
378 if (common->cur_frm != NULL)
379 vb2_buffer_done(&common->cur_frm->vb,
380 VB2_BUF_STATE_ERROR);
381 if (common->next_frm != NULL)
382 vb2_buffer_done(&common->next_frm->vb,
383 VB2_BUF_STATE_ERROR);
384 }
385
363 while (!list_empty(&common->dma_queue)) { 386 while (!list_empty(&common->dma_queue)) {
364 common->next_frm = list_entry(common->dma_queue.next, 387 common->next_frm = list_entry(common->dma_queue.next,
365 struct vpif_cap_buffer, list); 388 struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
933 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 956 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
934 /* Reset io_usrs member of channel object */ 957 /* Reset io_usrs member of channel object */
935 common->io_usrs = 0; 958 common->io_usrs = 0;
936 /* Disable channel as per its device type and channel id */
937 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
938 enable_channel0(0);
939 channel0_intr_enable(0);
940 }
941 if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
942 (2 == common->started)) {
943 enable_channel1(0);
944 channel1_intr_enable(0);
945 }
946 common->started = 0;
947 /* Free buffers allocated */ 959 /* Free buffers allocated */
948 vb2_queue_release(&common->buffer_queue); 960 vb2_queue_release(&common->buffer_queue);
949 vb2_dma_contig_cleanup_ctx(common->alloc_ctx); 961 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 0ac841e35aa4..aed41edd0501 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
320 320
321 common = &ch->common[VPIF_VIDEO_INDEX]; 321 common = &ch->common[VPIF_VIDEO_INDEX];
322 322
323 /* Disable channel */
324 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
325 enable_channel2(0);
326 channel2_intr_enable(0);
327 }
328 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
329 (2 == common->started)) {
330 enable_channel3(0);
331 channel3_intr_enable(0);
332 }
333 common->started = 0;
334
323 /* release all active buffers */ 335 /* release all active buffers */
324 spin_lock_irqsave(&common->irqlock, flags); 336 spin_lock_irqsave(&common->irqlock, flags);
337 if (common->cur_frm == common->next_frm) {
338 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
339 } else {
340 if (common->cur_frm != NULL)
341 vb2_buffer_done(&common->cur_frm->vb,
342 VB2_BUF_STATE_ERROR);
343 if (common->next_frm != NULL)
344 vb2_buffer_done(&common->next_frm->vb,
345 VB2_BUF_STATE_ERROR);
346 }
347
325 while (!list_empty(&common->dma_queue)) { 348 while (!list_empty(&common->dma_queue)) {
326 common->next_frm = list_entry(common->dma_queue.next, 349 common->next_frm = list_entry(common->dma_queue.next,
327 struct vpif_disp_buffer, list); 350 struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
773 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 796 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
774 /* Reset io_usrs member of channel object */ 797 /* Reset io_usrs member of channel object */
775 common->io_usrs = 0; 798 common->io_usrs = 0;
776 /* Disable channel */
777 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
778 enable_channel2(0);
779 channel2_intr_enable(0);
780 }
781 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
782 (2 == common->started)) {
783 enable_channel3(0);
784 channel3_intr_enable(0);
785 }
786 common->started = 0;
787
788 /* Free buffers allocated */ 799 /* Free buffers allocated */
789 vb2_queue_release(&common->buffer_queue); 800 vb2_queue_release(&common->buffer_queue);
790 vb2_dma_contig_cleanup_ctx(common->alloc_ctx); 801 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index da2fc86cc524..25dbf5b05a96 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
122 }, { 122 }, {
123 .name = "YUV 4:2:2 planar, Y/Cb/Cr", 123 .name = "YUV 4:2:2 planar, Y/Cb/Cr",
124 .fourcc = V4L2_PIX_FMT_YUV422P, 124 .fourcc = V4L2_PIX_FMT_YUV422P,
125 .depth = { 12 }, 125 .depth = { 16 },
126 .color = FIMC_FMT_YCBYCR422, 126 .color = FIMC_FMT_YCBYCR422,
127 .memplanes = 1, 127 .memplanes = 1,
128 .colplanes = 3, 128 .colplanes = 3,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 3aecaf465094..f0c9c42867de 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
195 195
196 f_ref = 2UL * priv->cfg->clock / r_val; 196 f_ref = 2UL * priv->cfg->clock / r_val;
197 n_val = div_u64_rem(f_vco, f_ref, &k_val); 197 n_val = div_u64_rem(f_vco, f_ref, &k_val);
198 k_val_reg = 1UL * k_val * (1 << 20) / f_ref; 198 k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
199 199
200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); 200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
201 if (ret < 0) 201 if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
348 if (ret < 0) 348 if (ret < 0)
349 goto err; 349 goto err;
350 350
351 ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ 351 ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
352 fc2580_if_filter_lut[i].mul / 1000000000); 352 fc2580_if_filter_lut[i].mul, 1000000000));
353 if (ret < 0) 353 if (ret < 0)
354 goto err; 354 goto err;
355 355
diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
index be38a9e637e0..646c99452136 100644
--- a/drivers/media/tuners/fc2580_priv.h
+++ b/drivers/media/tuners/fc2580_priv.h
@@ -22,6 +22,7 @@
22#define FC2580_PRIV_H 22#define FC2580_PRIV_H
23 23
24#include "fc2580.h" 24#include "fc2580.h"
25#include <linux/math64.h>
25 26
26struct fc2580_reg_val { 27struct fc2580_reg_val {
27 u8 reg; 28 u8 reg;
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index 7407b8338ccf..bc38f03394cd 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
41ccflags-y += -I$(srctree)/drivers/media/dvb-frontends 41ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
42ccflags-y += -I$(srctree)/drivers/media/tuners 42ccflags-y += -I$(srctree)/drivers/media/tuners
43ccflags-y += -I$(srctree)/drivers/media/common 43ccflags-y += -I$(srctree)/drivers/media/common
44ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 61d196e8b3ab..dcbd392e6efc 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,7 +24,6 @@
24 24
25#include "rtl2830.h" 25#include "rtl2830.h"
26#include "rtl2832.h" 26#include "rtl2832.h"
27#include "rtl2832_sdr.h"
28 27
29#include "qt1010.h" 28#include "qt1010.h"
30#include "mt2060.h" 29#include "mt2060.h"
@@ -36,6 +35,45 @@
36#include "tua9001.h" 35#include "tua9001.h"
37#include "r820t.h" 36#include "r820t.h"
38 37
38/*
39 * RTL2832_SDR module is in staging. That logic is added in order to avoid any
40 * hard dependency to drivers/staging/ directory as we want compile mainline
41 * driver even whole staging directory is missing.
42 */
43#include <media/v4l2-subdev.h>
44
45#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
46struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
47 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
48 struct v4l2_subdev *sd);
49#else
50static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
51 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
52 struct v4l2_subdev *sd)
53{
54 return NULL;
55}
56#endif
57
58#ifdef CONFIG_MEDIA_ATTACH
59#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
60 void *__r = NULL; \
61 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
62 if (__a) { \
63 __r = (void *) __a(ARGS); \
64 if (__r == NULL) \
65 symbol_put(FUNCTION); \
66 } \
67 __r; \
68})
69
70#else
71#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
72 FUNCTION(ARGS); \
73})
74
75#endif
76
39static int rtl28xxu_disable_rc; 77static int rtl28xxu_disable_rc;
40module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); 78module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
41MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); 79MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
908 adap->fe[0]->ops.tuner_ops.get_rf_strength; 946 adap->fe[0]->ops.tuner_ops.get_rf_strength;
909 947
910 /* attach SDR */ 948 /* attach SDR */
911 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 949 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
912 &rtl28xxu_rtl2832_fc0012_config, NULL); 950 &rtl28xxu_rtl2832_fc0012_config, NULL);
913 break; 951 break;
914 case TUNER_RTL2832_FC0013: 952 case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
920 adap->fe[0]->ops.tuner_ops.get_rf_strength; 958 adap->fe[0]->ops.tuner_ops.get_rf_strength;
921 959
922 /* attach SDR */ 960 /* attach SDR */
923 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 961 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
924 &rtl28xxu_rtl2832_fc0013_config, NULL); 962 &rtl28xxu_rtl2832_fc0013_config, NULL);
925 break; 963 break;
926 case TUNER_RTL2832_E4000: { 964 case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
951 i2c_set_adapdata(i2c_adap_internal, d); 989 i2c_set_adapdata(i2c_adap_internal, d);
952 990
953 /* attach SDR */ 991 /* attach SDR */
954 dvb_attach(rtl2832_sdr_attach, adap->fe[0], 992 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
955 i2c_adap_internal, 993 i2c_adap_internal,
956 &rtl28xxu_rtl2832_e4000_config, sd); 994 &rtl28xxu_rtl2832_e4000_config, sd);
957 } 995 }
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
982 adap->fe[0]->ops.tuner_ops.get_rf_strength; 1020 adap->fe[0]->ops.tuner_ops.get_rf_strength;
983 1021
984 /* attach SDR */ 1022 /* attach SDR */
985 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 1023 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
986 &rtl28xxu_rtl2832_r820t_config, NULL); 1024 &rtl28xxu_rtl2832_r820t_config, NULL);
987 break; 1025 break;
988 case TUNER_RTL2832_R828D: 1026 case TUNER_RTL2832_R828D:
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 7277dbd2afcd..ecbcb39feb71 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, 1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, 1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, 1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
1433#if !IS_ENABLED(CONFIG_USB_SN9C102)
1434 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, 1433 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
1435 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, 1434 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
1436#endif
1437 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ 1435 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
1438 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, 1436 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
1439 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, 1437 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 04b2daf567be..7e2411c36419 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
178 178
179static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 179static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
180{ 180{
181 if (get_user(kp->type, &up->type))
182 return -EFAULT;
183
181 switch (kp->type) { 184 switch (kp->type) {
182 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 185 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
183 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 186 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
204 207
205static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 208static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
206{ 209{
207 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || 210 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
208 get_user(kp->type, &up->type)) 211 return -EFAULT;
209 return -EFAULT;
210 return __get_v4l2_format32(kp, up); 212 return __get_v4l2_format32(kp, up);
211} 213}
212 214
213static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 215static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
214{ 216{
215 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || 217 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
216 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) 218 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
217 return -EFAULT; 219 return -EFAULT;
218 return __get_v4l2_format32(&kp->format, &up->format); 220 return __get_v4l2_format32(&kp->format, &up->format);
219} 221}
220 222
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
82} 82}
83 83
84/* Forward declaration */ 84/* Forward declaration */
85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
86 bool strict_match);
86static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); 87static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
87static void rlb_src_unlink(struct bonding *bond, u32 index); 88static void rlb_src_unlink(struct bonding *bond, u32 index);
88static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, 89static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
459 460
460 bond->alb_info.rlb_promisc_timeout_counter = 0; 461 bond->alb_info.rlb_promisc_timeout_counter = 0;
461 462
462 alb_send_learning_packets(bond->curr_active_slave, addr); 463 alb_send_learning_packets(bond->curr_active_slave, addr, true);
463} 464}
464 465
465/* slave being removed should not be active at this point 466/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
995/*********************** tlb/rlb shared functions *********************/ 996/*********************** tlb/rlb shared functions *********************/
996 997
997static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], 998static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
998 u16 vid) 999 __be16 vlan_proto, u16 vid)
999{ 1000{
1000 struct learning_pkt pkt; 1001 struct learning_pkt pkt;
1001 struct sk_buff *skb; 1002 struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1021 skb->dev = slave->dev; 1022 skb->dev = slave->dev;
1022 1023
1023 if (vid) { 1024 if (vid) {
1024 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); 1025 skb = vlan_put_tag(skb, vlan_proto, vid);
1025 if (!skb) { 1026 if (!skb) {
1026 pr_err("%s: Error: failed to insert VLAN tag\n", 1027 pr_err("%s: Error: failed to insert VLAN tag\n",
1027 slave->bond->dev->name); 1028 slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1032 dev_queue_xmit(skb); 1033 dev_queue_xmit(skb);
1033} 1034}
1034 1035
1035 1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) 1037 bool strict_match)
1037{ 1038{
1038 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1039 struct net_device *upper; 1040 struct net_device *upper;
1040 struct list_head *iter; 1041 struct list_head *iter;
1041 1042
1042 /* send untagged */ 1043 /* send untagged */
1043 alb_send_lp_vid(slave, mac_addr, 0); 1044 alb_send_lp_vid(slave, mac_addr, 0, 0);
1044 1045
1045 /* loop through vlans and send one packet for each */ 1046 /* loop through vlans and send one packet for each */
1046 rcu_read_lock(); 1047 rcu_read_lock();
1047 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1048 if (upper->priv_flags & IFF_802_1Q_VLAN) 1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
1049 alb_send_lp_vid(slave, mac_addr, 1050 if (strict_match &&
1050 vlan_dev_vlan_id(upper)); 1051 ether_addr_equal_64bits(mac_addr,
1052 upper->dev_addr)) {
1053 alb_send_lp_vid(slave, mac_addr,
1054 vlan_dev_vlan_proto(upper),
1055 vlan_dev_vlan_id(upper));
1056 } else if (!strict_match) {
1057 alb_send_lp_vid(slave, upper->dev_addr,
1058 vlan_dev_vlan_proto(upper),
1059 vlan_dev_vlan_id(upper));
1060 }
1061 }
1051 } 1062 }
1052 rcu_read_unlock(); 1063 rcu_read_unlock();
1053} 1064}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1107 1118
1108 /* fasten the change in the switch */ 1119 /* fasten the change in the switch */
1109 if (SLAVE_IS_OK(slave1)) { 1120 if (SLAVE_IS_OK(slave1)) {
1110 alb_send_learning_packets(slave1, slave1->dev->dev_addr); 1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1111 if (bond->alb_info.rlb_enabled) { 1122 if (bond->alb_info.rlb_enabled) {
1112 /* inform the clients that the mac address 1123 /* inform the clients that the mac address
1113 * has changed 1124 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1119 } 1130 }
1120 1131
1121 if (SLAVE_IS_OK(slave2)) { 1132 if (SLAVE_IS_OK(slave2)) {
1122 alb_send_learning_packets(slave2, slave2->dev->dev_addr); 1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1123 if (bond->alb_info.rlb_enabled) { 1134 if (bond->alb_info.rlb_enabled) {
1124 /* inform the clients that the mac address 1135 /* inform the clients that the mac address
1125 * has changed 1136 * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
1490 1501
1491 /* send learning packets */ 1502 /* send learning packets */
1492 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { 1503 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1504 bool strict_match;
1505
1493 /* change of curr_active_slave involves swapping of mac addresses. 1506 /* change of curr_active_slave involves swapping of mac addresses.
1494 * in order to avoid this swapping from happening while 1507 * in order to avoid this swapping from happening while
1495 * sending the learning packets, the curr_slave_lock must be held for 1508 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
1497 */ 1510 */
1498 read_lock(&bond->curr_slave_lock); 1511 read_lock(&bond->curr_slave_lock);
1499 1512
1500 bond_for_each_slave_rcu(bond, slave, iter) 1513 bond_for_each_slave_rcu(bond, slave, iter) {
1501 alb_send_learning_packets(slave, slave->dev->dev_addr); 1514 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device.
1517 */
1518 strict_match = (slave != bond->curr_active_slave);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match);
1521 }
1502 1522
1503 read_unlock(&bond->curr_slave_lock); 1523 read_unlock(&bond->curr_slave_lock);
1504 1524
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1721 } else { 1741 } else {
1722 /* set the new_slave to the bond mac address */ 1742 /* set the new_slave to the bond mac address */
1723 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); 1743 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1724 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1744 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1745 false);
1725 } 1746 }
1726 1747
1727 write_lock_bh(&bond->curr_slave_lock); 1748 write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1764 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1785 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
1765 1786
1766 read_lock(&bond->lock); 1787 read_lock(&bond->lock);
1767 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1788 alb_send_learning_packets(bond->curr_active_slave,
1789 bond_dev->dev_addr, false);
1768 if (bond->alb_info.rlb_enabled) { 1790 if (bond->alb_info.rlb_enabled) {
1769 /* inform clients mac address has changed */ 1791 /* inform clients mac address has changed */
1770 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1792 rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2126 */ 2126 */
2127static void bond_arp_send(struct net_device *slave_dev, int arp_op, 2127static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2128 __be32 dest_ip, __be32 src_ip, 2128 __be32 dest_ip, __be32 src_ip,
2129 struct bond_vlan_tag *inner, 2129 struct bond_vlan_tag *tags)
2130 struct bond_vlan_tag *outer)
2131{ 2130{
2132 struct sk_buff *skb; 2131 struct sk_buff *skb;
2132 int i;
2133 2133
2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", 2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
2135 arp_op, slave_dev->name, &dest_ip, &src_ip); 2135 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2141 net_err_ratelimited("ARP packet allocation failed\n"); 2141 net_err_ratelimited("ARP packet allocation failed\n");
2142 return; 2142 return;
2143 } 2143 }
2144 if (outer->vlan_id) {
2145 if (inner->vlan_id) {
2146 pr_debug("inner tag: proto %X vid %X\n",
2147 ntohs(inner->vlan_proto), inner->vlan_id);
2148 skb = __vlan_put_tag(skb, inner->vlan_proto,
2149 inner->vlan_id);
2150 if (!skb) {
2151 net_err_ratelimited("failed to insert inner VLAN tag\n");
2152 return;
2153 }
2154 }
2155 2144
2156 pr_debug("outer reg: proto %X vid %X\n", 2145 /* Go through all the tags backwards and add them to the packet */
2157 ntohs(outer->vlan_proto), outer->vlan_id); 2146 for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
2158 skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); 2147 if (!tags[i].vlan_id)
2148 continue;
2149
2150 pr_debug("inner tag: proto %X vid %X\n",
2151 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
2152 skb = __vlan_put_tag(skb, tags[i].vlan_proto,
2153 tags[i].vlan_id);
2154 if (!skb) {
2155 net_err_ratelimited("failed to insert inner VLAN tag\n");
2156 return;
2157 }
2158 }
2159 /* Set the outer tag */
2160 if (tags[0].vlan_id) {
2161 pr_debug("outer tag: proto %X vid %X\n",
2162 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
2163 skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
2159 if (!skb) { 2164 if (!skb) {
2160 net_err_ratelimited("failed to insert outer VLAN tag\n"); 2165 net_err_ratelimited("failed to insert outer VLAN tag\n");
2161 return; 2166 return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2164 arp_xmit(skb); 2169 arp_xmit(skb);
2165} 2170}
2166 2171
2172/* Validate the device path between the @start_dev and the @end_dev.
2173 * The path is valid if the @end_dev is reachable through device
2174 * stacking.
2175 * When the path is validated, collect any vlan information in the
2176 * path.
2177 */
2178static bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags)
2181{
2182 struct net_device *upper;
2183 struct list_head *iter;
2184 int idx;
2185
2186 if (start_dev == end_dev)
2187 return true;
2188
2189 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2190 if (bond_verify_device_path(upper, end_dev, tags)) {
2191 if (is_vlan_dev(upper)) {
2192 idx = vlan_get_encap_level(upper);
2193 if (idx >= BOND_MAX_VLAN_ENCAP)
2194 return false;
2195
2196 tags[idx].vlan_proto =
2197 vlan_dev_vlan_proto(upper);
2198 tags[idx].vlan_id = vlan_dev_vlan_id(upper);
2199 }
2200 return true;
2201 }
2202 }
2203
2204 return false;
2205}
2167 2206
2168static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2207static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2169{ 2208{
2170 struct net_device *upper, *vlan_upper;
2171 struct list_head *iter, *vlan_iter;
2172 struct rtable *rt; 2209 struct rtable *rt;
2173 struct bond_vlan_tag inner, outer; 2210 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
2174 __be32 *targets = bond->params.arp_targets, addr; 2211 __be32 *targets = bond->params.arp_targets, addr;
2175 int i; 2212 int i;
2213 bool ret;
2176 2214
2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2215 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2178 pr_debug("basa: target %pI4\n", &targets[i]); 2216 pr_debug("basa: target %pI4\n", &targets[i]);
2179 inner.vlan_proto = 0; 2217 memset(tags, 0, sizeof(tags));
2180 inner.vlan_id = 0;
2181 outer.vlan_proto = 0;
2182 outer.vlan_id = 0;
2183 2218
2184 /* Find out through which dev should the packet go */ 2219 /* Find out through which dev should the packet go */
2185 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2220 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2192 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2227 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2193 bond->dev->name, 2228 bond->dev->name,
2194 &targets[i]); 2229 &targets[i]);
2195 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); 2230 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2231 0, tags);
2196 continue; 2232 continue;
2197 } 2233 }
2198 2234
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2201 goto found; 2237 goto found;
2202 2238
2203 rcu_read_lock(); 2239 rcu_read_lock();
2204 /* first we search only for vlan devices. for every vlan 2240 ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
2205 * found we verify its upper dev list, searching for the
2206 * rt->dst.dev. If found we save the tag of the vlan and
2207 * proceed to send the packet.
2208 */
2209 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2210 vlan_iter) {
2211 if (!is_vlan_dev(vlan_upper))
2212 continue;
2213
2214 if (vlan_upper == rt->dst.dev) {
2215 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2216 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2217 rcu_read_unlock();
2218 goto found;
2219 }
2220 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2221 iter) {
2222 if (upper == rt->dst.dev) {
2223 /* If the upper dev is a vlan dev too,
2224 * set the vlan tag to inner tag.
2225 */
2226 if (is_vlan_dev(upper)) {
2227 inner.vlan_proto = vlan_dev_vlan_proto(upper);
2228 inner.vlan_id = vlan_dev_vlan_id(upper);
2229 }
2230 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2231 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2232 rcu_read_unlock();
2233 goto found;
2234 }
2235 }
2236 }
2237
2238 /* if the device we're looking for is not on top of any of
2239 * our upper vlans, then just search for any dev that
2240 * matches, and in case it's a vlan - save the id
2241 */
2242 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2243 if (upper == rt->dst.dev) {
2244 rcu_read_unlock();
2245 goto found;
2246 }
2247 }
2248 rcu_read_unlock(); 2241 rcu_read_unlock();
2249 2242
2243 if (ret)
2244 goto found;
2245
2250 /* Not our device - skip */ 2246 /* Not our device - skip */
2251 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2247 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2252 bond->dev->name, &targets[i], 2248 bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
2259 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2255 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2260 ip_rt_put(rt); 2256 ip_rt_put(rt);
2261 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2257 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2262 addr, &inner, &outer); 2258 addr, tags);
2263 } 2259 }
2264} 2260}
2265 2261
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
125static const struct bond_opt_value bond_intmax_tbl[] = { 125static const struct bond_opt_value bond_intmax_tbl[] = {
126 { "off", 0, BOND_VALFLAG_DEFAULT}, 126 { "off", 0, BOND_VALFLAG_DEFAULT},
127 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 127 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
128 { NULL, -1, 0}
128}; 129};
129 130
130static const struct bond_opt_value bond_lacp_rate_tbl[] = { 131static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
36 36
37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" 37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
38 38
39#define BOND_MAX_VLAN_ENCAP 2
39#define BOND_MAX_ARP_TARGETS 16 40#define BOND_MAX_ARP_TARGETS 16
40 41
41#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 8ab7103d4f44..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) 14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
15 boards like am335x, dm814x, dm813x and dm811x. 15 boards like am335x, dm814x, dm813x and dm811x.
16 16
17config CAN_C_CAN_STRICT_FRAME_ORDERING
18 bool "Force a strict RX CAN frame order (may cause frame loss)"
19 ---help---
20 The RX split buffer prevents packet reordering but can cause packet
21 loss. Only enable this option when you accept to lose CAN frames
22 in favour of getting the received CAN frames in the correct order.
23
24config CAN_C_CAN_PCI 17config CAN_C_CAN_PCI
25 tristate "Generic PCI Bus based C_CAN/D_CAN driver" 18 tristate "Generic PCI Bus based C_CAN/D_CAN driver"
26 depends on PCI 19 depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a2ca820b5373..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
732static inline void c_can_rx_object_get(struct net_device *dev, 732static inline void c_can_rx_object_get(struct net_device *dev,
733 struct c_can_priv *priv, u32 obj) 733 struct c_can_priv *priv, u32 obj)
734{ 734{
735#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
736 if (obj < C_CAN_MSG_RX_LOW_LAST)
737 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
738 else
739#endif
740 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); 735 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
741} 736}
742 737
743static inline void c_can_rx_finalize(struct net_device *dev, 738static inline void c_can_rx_finalize(struct net_device *dev,
744 struct c_can_priv *priv, u32 obj) 739 struct c_can_priv *priv, u32 obj)
745{ 740{
746#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
747 if (obj < C_CAN_MSG_RX_LOW_LAST)
748 priv->rxmasked |= BIT(obj - 1);
749 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
750 priv->rxmasked = 0;
751 /* activate all lower message objects */
752 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
753 }
754#endif
755 if (priv->type != BOSCH_D_CAN) 741 if (priv->type != BOSCH_D_CAN)
756 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); 742 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
757} 743}
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
799{ 785{
800 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); 786 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
801 787
802#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
803 pend &= ~priv->rxmasked;
804#endif
805 return pend; 788 return pend;
806} 789}
807 790
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
814 * has arrived. To work-around this issue, we keep two groups of message 797 * has arrived. To work-around this issue, we keep two groups of message
815 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 798 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
816 * 799 *
817 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
818 *
819 * To ensure in-order frame reception we use the following
820 * approach while re-activating a message object to receive further
821 * frames:
822 * - if the current message object number is lower than
823 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
824 * the INTPND bit.
825 * - if the current message object number is equal to
826 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
827 * receive message objects.
828 * - if the current message object number is greater than
829 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
830 * only this message object.
831 *
832 * This can cause packet loss!
833 *
834 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
835 *
836 * We clear the newdat bit right away. 800 * We clear the newdat bit right away.
837 * 801 *
838 * This can result in packet reordering when the readout is slow. 802 * This can result in packet reordering when the readout is slow.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
551{ 551{
552 struct sja1000_priv *priv; 552 struct sja1000_priv *priv;
553 struct peak_pci_chan *chan; 553 struct peak_pci_chan *chan;
554 struct net_device *dev; 554 struct net_device *dev, *prev_dev;
555 void __iomem *cfg_base, *reg_base; 555 void __iomem *cfg_base, *reg_base;
556 u16 sub_sys_id, icr; 556 u16 sub_sys_id, icr;
557 int i, err, channels; 557 int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
688 writew(0x0, cfg_base + PITA_ICR + 2); 688 writew(0x0, cfg_base + PITA_ICR + 2);
689 689
690 chan = NULL; 690 chan = NULL;
691 for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { 691 for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
692 unregister_sja1000dev(dev);
693 free_sja1000dev(dev);
694 priv = netdev_priv(dev); 692 priv = netdev_priv(dev);
695 chan = priv->priv; 693 chan = priv->priv;
694 prev_dev = chan->prev_dev;
695
696 unregister_sja1000dev(dev);
697 free_sja1000dev(dev);
696 } 698 }
697 699
698 /* free any PCIeC resources too */ 700 /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
726 728
727 /* Loop over all registered devices */ 729 /* Loop over all registered devices */
728 while (1) { 730 while (1) {
731 struct net_device *prev_dev = chan->prev_dev;
732
729 dev_info(&pdev->dev, "removing device %s\n", dev->name); 733 dev_info(&pdev->dev, "removing device %s\n", dev->name);
730 unregister_sja1000dev(dev); 734 unregister_sja1000dev(dev);
731 free_sja1000dev(dev); 735 free_sja1000dev(dev);
732 dev = chan->prev_dev; 736 dev = prev_dev;
733 737
734 if (!dev) { 738 if (!dev) {
735 /* do that only for first channel */ 739 /* do that only for first channel */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
35source "drivers/net/ethernet/chelsio/Kconfig" 35source "drivers/net/ethernet/chelsio/Kconfig"
36source "drivers/net/ethernet/cirrus/Kconfig" 36source "drivers/net/ethernet/cirrus/Kconfig"
37source "drivers/net/ethernet/cisco/Kconfig" 37source "drivers/net/ethernet/cisco/Kconfig"
38
39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI
42 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX
45 Beckhoff CX series industrial PCs.
46
47 To compile this driver as a module, choose M here. The module
48 will be called ec_bhf.
49
38source "drivers/net/ethernet/davicom/Kconfig" 50source "drivers/net/ethernet/davicom/Kconfig"
39 51
40config DNET 52config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
24obj-$(CONFIG_CX_ECAT) += ec_bhf.o
24obj-$(CONFIG_DM9000) += davicom/ 25obj-$(CONFIG_DM9000) += davicom/
25obj-$(CONFIG_DNET) += dnet.o 26obj-$(CONFIG_DNET) += dnet.o
26obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 27obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o 5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ 6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o 7altera_msgdma.o altera_sgdma.o altera_utils.o
8ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 4d1f2fdd5c32..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
37void msgdma_reset(struct altera_tse_private *priv) 37void msgdma_reset(struct altera_tse_private *priv)
38{ 38{
39 int counter; 39 int counter;
40 struct msgdma_csr *txcsr =
41 (struct msgdma_csr *)priv->tx_dma_csr;
42 struct msgdma_csr *rxcsr =
43 (struct msgdma_csr *)priv->rx_dma_csr;
44 40
45 /* Reset Rx mSGDMA */ 41 /* Reset Rx mSGDMA */
46 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 42 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
47 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); 43 msgdma_csroffs(status));
44 csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
45 msgdma_csroffs(control));
48 46
49 counter = 0; 47 counter = 0;
50 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 48 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
51 if (tse_bit_is_clear(&rxcsr->status, 49 if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
52 MSGDMA_CSR_STAT_RESETTING)) 50 MSGDMA_CSR_STAT_RESETTING))
53 break; 51 break;
54 udelay(1); 52 udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
59 "TSE Rx mSGDMA resetting bit never cleared!\n"); 57 "TSE Rx mSGDMA resetting bit never cleared!\n");
60 58
61 /* clear all status bits */ 59 /* clear all status bits */
62 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 60 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
63 61
64 /* Reset Tx mSGDMA */ 62 /* Reset Tx mSGDMA */
65 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 63 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
66 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); 64 msgdma_csroffs(status));
65
66 csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
67 msgdma_csroffs(control));
67 68
68 counter = 0; 69 counter = 0;
69 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 70 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
70 if (tse_bit_is_clear(&txcsr->status, 71 if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
71 MSGDMA_CSR_STAT_RESETTING)) 72 MSGDMA_CSR_STAT_RESETTING))
72 break; 73 break;
73 udelay(1); 74 udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
78 "TSE Tx mSGDMA resetting bit never cleared!\n"); 79 "TSE Tx mSGDMA resetting bit never cleared!\n");
79 80
80 /* clear all status bits */ 81 /* clear all status bits */
81 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 82 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
82} 83}
83 84
84void msgdma_disable_rxirq(struct altera_tse_private *priv) 85void msgdma_disable_rxirq(struct altera_tse_private *priv)
85{ 86{
86 struct msgdma_csr *csr = priv->rx_dma_csr; 87 tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
87 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 88 MSGDMA_CSR_CTL_GLOBAL_INTR);
88} 89}
89 90
90void msgdma_enable_rxirq(struct altera_tse_private *priv) 91void msgdma_enable_rxirq(struct altera_tse_private *priv)
91{ 92{
92 struct msgdma_csr *csr = priv->rx_dma_csr; 93 tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
93 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 94 MSGDMA_CSR_CTL_GLOBAL_INTR);
94} 95}
95 96
96void msgdma_disable_txirq(struct altera_tse_private *priv) 97void msgdma_disable_txirq(struct altera_tse_private *priv)
97{ 98{
98 struct msgdma_csr *csr = priv->tx_dma_csr; 99 tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
99 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 100 MSGDMA_CSR_CTL_GLOBAL_INTR);
100} 101}
101 102
102void msgdma_enable_txirq(struct altera_tse_private *priv) 103void msgdma_enable_txirq(struct altera_tse_private *priv)
103{ 104{
104 struct msgdma_csr *csr = priv->tx_dma_csr; 105 tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
105 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 106 MSGDMA_CSR_CTL_GLOBAL_INTR);
106} 107}
107 108
108void msgdma_clear_rxirq(struct altera_tse_private *priv) 109void msgdma_clear_rxirq(struct altera_tse_private *priv)
109{ 110{
110 struct msgdma_csr *csr = priv->rx_dma_csr; 111 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
111 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
112} 112}
113 113
114void msgdma_clear_txirq(struct altera_tse_private *priv) 114void msgdma_clear_txirq(struct altera_tse_private *priv)
115{ 115{
116 struct msgdma_csr *csr = priv->tx_dma_csr; 116 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
117 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
118} 117}
119 118
120/* return 0 to indicate transmit is pending */ 119/* return 0 to indicate transmit is pending */
121int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 120int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
122{ 121{
123 struct msgdma_extended_desc *desc = priv->tx_dma_desc; 122 csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
124 123 msgdma_descroffs(read_addr_lo));
125 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); 124 csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
126 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); 125 msgdma_descroffs(read_addr_hi));
127 iowrite32(0, &desc->write_addr_lo); 126 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
128 iowrite32(0, &desc->write_addr_hi); 127 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
129 iowrite32(buffer->len, &desc->len); 128 csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
130 iowrite32(0, &desc->burst_seq_num); 129 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
131 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); 130 csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
132 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); 131 msgdma_descroffs(stride));
132 csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
133 msgdma_descroffs(control));
133 return 0; 134 return 0;
134} 135}
135 136
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
138 u32 ready = 0; 139 u32 ready = 0;
139 u32 inuse; 140 u32 inuse;
140 u32 status; 141 u32 status;
141 struct msgdma_csr *txcsr =
142 (struct msgdma_csr *)priv->tx_dma_csr;
143 142
144 /* Get number of sent descriptors */ 143 /* Get number of sent descriptors */
145 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; 144 inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
145 & 0xffff;
146 146
147 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
148 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
149 } else { 149 } else {
150 /* Check for buffered last packet */ 150 /* Check for buffered last packet */
151 status = ioread32(&txcsr->status); 151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
152 if (status & MSGDMA_CSR_STAT_BUSY) 152 if (status & MSGDMA_CSR_STAT_BUSY)
153 ready = priv->tx_prod - priv->tx_cons - 1; 153 ready = priv->tx_prod - priv->tx_cons - 1;
154 else 154 else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
162void msgdma_add_rx_desc(struct altera_tse_private *priv, 162void msgdma_add_rx_desc(struct altera_tse_private *priv,
163 struct tse_buffer *rxbuffer) 163 struct tse_buffer *rxbuffer)
164{ 164{
165 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
166 u32 len = priv->rx_dma_buf_sz; 165 u32 len = priv->rx_dma_buf_sz;
167 dma_addr_t dma_addr = rxbuffer->dma_addr; 166 dma_addr_t dma_addr = rxbuffer->dma_addr;
168 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP 167 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
172 | MSGDMA_DESC_CTL_TR_ERR_IRQ 171 | MSGDMA_DESC_CTL_TR_ERR_IRQ
173 | MSGDMA_DESC_CTL_GO); 172 | MSGDMA_DESC_CTL_GO);
174 173
175 iowrite32(0, &desc->read_addr_lo); 174 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
176 iowrite32(0, &desc->read_addr_hi); 175 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
177 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); 176 csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
178 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); 177 msgdma_descroffs(write_addr_lo));
179 iowrite32(len, &desc->len); 178 csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
180 iowrite32(0, &desc->burst_seq_num); 179 msgdma_descroffs(write_addr_hi));
181 iowrite32(0x00010001, &desc->stride); 180 csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
182 iowrite32(control, &desc->control); 181 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
182 csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
183 csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
183} 184}
184 185
185/* status is returned on upper 16 bits, 186/* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
190 u32 rxstatus = 0; 191 u32 rxstatus = 0;
191 u32 pktlength; 192 u32 pktlength;
192 u32 pktstatus; 193 u32 pktstatus;
193 struct msgdma_csr *rxcsr = 194
194 (struct msgdma_csr *)priv->rx_dma_csr; 195 if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
195 struct msgdma_response *rxresp = 196 & 0xffff) {
196 (struct msgdma_response *)priv->rx_dma_resp; 197 pktlength = csrrd32(priv->rx_dma_resp,
197 198 msgdma_respoffs(bytes_transferred));
198 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { 199 pktstatus = csrrd32(priv->rx_dma_resp,
199 pktlength = ioread32(&rxresp->bytes_transferred); 200 msgdma_respoffs(status));
200 pktstatus = ioread32(&rxresp->status);
201 rxstatus = pktstatus; 201 rxstatus = pktstatus;
202 rxstatus = rxstatus << 16; 202 rxstatus = rxstatus << 16;
203 rxstatus |= (pktlength & 0xffff); 203 rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
17#ifndef __ALTERA_MSGDMAHW_H__ 17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__ 18#define __ALTERA_MSGDMAHW_H__
19 19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format 20/* mSGDMA extended descriptor format
30 */ 21 */
31struct msgdma_extended_desc { 22struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
159 u32 status; 150 u32 status;
160}; 151};
161 152
153#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
154#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
155#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
156
162/* mSGDMA response register bit definitions 157/* mSGDMA response register bit definitions
163 */ 158 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8) 159#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 9ce8630692b6..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,8 +20,8 @@
20#include "altera_sgdmahw.h" 20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h" 21#include "altera_sgdma.h"
22 22
23static void sgdma_setup_descrip(struct sgdma_descrip *desc, 23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
24 struct sgdma_descrip *ndesc, 24 struct sgdma_descrip __iomem *ndesc,
25 dma_addr_t ndesc_phys, 25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr, 26 dma_addr_t raddr,
27 dma_addr_t waddr, 27 dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
31 int wfixed); 31 int wfixed);
32 32
33static int sgdma_async_write(struct altera_tse_private *priv, 33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc); 34 struct sgdma_descrip __iomem *desc);
35 35
36static int sgdma_async_read(struct altera_tse_private *priv); 36static int sgdma_async_read(struct altera_tse_private *priv);
37 37
38static dma_addr_t 38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv, 39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc); 40 struct sgdma_descrip __iomem *desc);
41 41
42static dma_addr_t 42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv, 43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc); 44 struct sgdma_descrip __iomem *desc);
45 45
46static int sgdma_txbusy(struct altera_tse_private *priv); 46static int sgdma_txbusy(struct altera_tse_private *priv);
47 47
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
79 priv->rxdescphys = (dma_addr_t) 0; 79 priv->rxdescphys = (dma_addr_t) 0;
80 priv->txdescphys = (dma_addr_t) 0; 80 priv->txdescphys = (dma_addr_t) 0;
81 81
82 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, 82 priv->rxdescphys = dma_map_single(priv->device,
83 (void __force *)priv->rx_dma_desc,
83 priv->rxdescmem, DMA_BIDIRECTIONAL); 84 priv->rxdescmem, DMA_BIDIRECTIONAL);
84 85
85 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 86 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
88 return -EINVAL; 89 return -EINVAL;
89 } 90 }
90 91
91 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, 92 priv->txdescphys = dma_map_single(priv->device,
93 (void __force *)priv->tx_dma_desc,
92 priv->txdescmem, DMA_TO_DEVICE); 94 priv->txdescmem, DMA_TO_DEVICE);
93 95
94 if (dma_mapping_error(priv->device, priv->txdescphys)) { 96 if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
98 } 100 }
99 101
100 /* Initialize descriptor memory to all 0's, sync memory to cache */ 102 /* Initialize descriptor memory to all 0's, sync memory to cache */
101 memset(priv->tx_dma_desc, 0, priv->txdescmem); 103 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
102 memset(priv->rx_dma_desc, 0, priv->rxdescmem); 104 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
103 105
104 dma_sync_single_for_device(priv->device, priv->txdescphys, 106 dma_sync_single_for_device(priv->device, priv->txdescphys,
105 priv->txdescmem, DMA_TO_DEVICE); 107 priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
126 */ 128 */
127void sgdma_reset(struct altera_tse_private *priv) 129void sgdma_reset(struct altera_tse_private *priv)
128{ 130{
129 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
130 u32 txdescriplen = priv->txdescmem;
131 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
132 u32 rxdescriplen = priv->rxdescmem;
133 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
134 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
135
136 /* Initialize descriptor memory to 0 */ 131 /* Initialize descriptor memory to 0 */
137 memset(ptxdescripmem, 0, txdescriplen); 132 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
138 memset(prxdescripmem, 0, rxdescriplen); 133 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
139 134
140 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); 135 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
141 iowrite32(0, &ptxsgdma->control); 136 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
142 137
143 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); 138 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
144 iowrite32(0, &prxsgdma->control); 139 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
145} 140}
146 141
147/* For SGDMA, interrupts remain enabled after initially enabling, 142/* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
167 162
168void sgdma_clear_rxirq(struct altera_tse_private *priv) 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
169{ 164{
170 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 165 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
171 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 166 SGDMA_CTRLREG_CLRINT);
172} 167}
173 168
174void sgdma_clear_txirq(struct altera_tse_private *priv) 169void sgdma_clear_txirq(struct altera_tse_private *priv)
175{ 170{
176 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 171 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
177 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 172 SGDMA_CTRLREG_CLRINT);
178} 173}
179 174
180/* transmits buffer through SGDMA. Returns number of buffers 175/* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
184 */ 179 */
185int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
186{ 181{
187 int pktstx = 0; 182 struct sgdma_descrip __iomem *descbase =
188 struct sgdma_descrip *descbase = 183 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
189 (struct sgdma_descrip *)priv->tx_dma_desc;
190 184
191 struct sgdma_descrip *cdesc = &descbase[0]; 185 struct sgdma_descrip __iomem *cdesc = &descbase[0];
192 struct sgdma_descrip *ndesc = &descbase[1]; 186 struct sgdma_descrip __iomem *ndesc = &descbase[1];
193 187
194 /* wait 'til the tx sgdma is ready for the next transmit request */ 188 /* wait 'til the tx sgdma is ready for the next transmit request */
195 if (sgdma_txbusy(priv)) 189 if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
205 0, /* read fixed */ 199 0, /* read fixed */
206 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 200 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
207 201
208 pktstx = sgdma_async_write(priv, cdesc); 202 sgdma_async_write(priv, cdesc);
209 203
210 /* enqueue the request to the pending transmit queue */ 204 /* enqueue the request to the pending transmit queue */
211 queue_tx(priv, buffer); 205 queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
219u32 sgdma_tx_completions(struct altera_tse_private *priv) 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
220{ 214{
221 u32 ready = 0; 215 u32 ready = 0;
222 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
223 216
224 if (!sgdma_txbusy(priv) && 217 if (!sgdma_txbusy(priv) &&
225 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && 218 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
219 & SGDMA_CONTROL_HW_OWNED) == 0) &&
226 (dequeue_tx(priv))) { 220 (dequeue_tx(priv))) {
227 ready = 1; 221 ready = 1;
228 } 222 }
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
246 */ 240 */
247u32 sgdma_rx_status(struct altera_tse_private *priv) 241u32 sgdma_rx_status(struct altera_tse_private *priv)
248{ 242{
249 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 243 struct sgdma_descrip __iomem *base =
250 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; 244 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
251 struct sgdma_descrip *desc = NULL; 245 struct sgdma_descrip __iomem *desc = NULL;
252 int pktsrx;
253 unsigned int rxstatus = 0;
254 unsigned int pktlength = 0;
255 unsigned int pktstatus = 0;
256 struct tse_buffer *rxbuffer = NULL; 246 struct tse_buffer *rxbuffer = NULL;
247 unsigned int rxstatus = 0;
257 248
258 u32 sts = ioread32(&csr->status); 249 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
259 250
260 desc = &base[0]; 251 desc = &base[0];
261 if (sts & SGDMA_STSREG_EOP) { 252 if (sts & SGDMA_STSREG_EOP) {
253 unsigned int pktlength = 0;
254 unsigned int pktstatus = 0;
262 dma_sync_single_for_cpu(priv->device, 255 dma_sync_single_for_cpu(priv->device,
263 priv->rxdescphys, 256 priv->rxdescphys,
264 priv->sgdmadesclen, 257 priv->sgdmadesclen,
265 DMA_FROM_DEVICE); 258 DMA_FROM_DEVICE);
266 259
267 pktlength = desc->bytes_xferred; 260 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
268 pktstatus = desc->status & 0x3f; 261 pktstatus = csrrd8(desc, sgdma_descroffs(status));
269 rxstatus = pktstatus; 262 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
270 rxstatus = rxstatus << 16; 263 rxstatus = rxstatus << 16;
271 rxstatus |= (pktlength & 0xffff); 264 rxstatus |= (pktlength & 0xffff);
272 265
273 if (rxstatus) { 266 if (rxstatus) {
274 desc->status = 0; 267 csrwr8(0, desc, sgdma_descroffs(status));
275 268
276 rxbuffer = dequeue_rx(priv); 269 rxbuffer = dequeue_rx(priv);
277 if (rxbuffer == NULL) 270 if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
279 "sgdma rx and rx queue empty!\n"); 272 "sgdma rx and rx queue empty!\n");
280 273
281 /* Clear control */ 274 /* Clear control */
282 iowrite32(0, &csr->control); 275 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
283 /* clear status */ 276 /* clear status */
284 iowrite32(0xf, &csr->status); 277 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
285 278
286 /* kick the rx sgdma after reaping this descriptor */ 279 /* kick the rx sgdma after reaping this descriptor */
287 pktsrx = sgdma_async_read(priv); 280 sgdma_async_read(priv);
288 281
289 } else { 282 } else {
290 /* If the SGDMA indicated an end of packet on recv, 283 /* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
298 */ 291 */
299 netdev_err(priv->dev, 292 netdev_err(priv->dev,
300 "SGDMA RX Error Info: %x, %x, %x\n", 293 "SGDMA RX Error Info: %x, %x, %x\n",
301 sts, desc->status, rxstatus); 294 sts, csrrd8(desc, sgdma_descroffs(status)),
295 rxstatus);
302 } 296 }
303 } else if (sts == 0) { 297 } else if (sts == 0) {
304 pktsrx = sgdma_async_read(priv); 298 sgdma_async_read(priv);
305 } 299 }
306 300
307 return rxstatus; 301 return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
309 303
310 304
311/* Private functions */ 305/* Private functions */
312static void sgdma_setup_descrip(struct sgdma_descrip *desc, 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
313 struct sgdma_descrip *ndesc, 307 struct sgdma_descrip __iomem *ndesc,
314 dma_addr_t ndesc_phys, 308 dma_addr_t ndesc_phys,
315 dma_addr_t raddr, 309 dma_addr_t raddr,
316 dma_addr_t waddr, 310 dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
320 int wfixed) 314 int wfixed)
321{ 315{
322 /* Clear the next descriptor as not owned by hardware */ 316 /* Clear the next descriptor as not owned by hardware */
323 u32 ctrl = ndesc->control; 317
318 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
324 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 319 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
325 ndesc->control = ctrl; 320 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
326 321
327 ctrl = 0;
328 ctrl = SGDMA_CONTROL_HW_OWNED; 322 ctrl = SGDMA_CONTROL_HW_OWNED;
329 ctrl |= generate_eop; 323 ctrl |= generate_eop;
330 ctrl |= rfixed; 324 ctrl |= rfixed;
331 ctrl |= wfixed; 325 ctrl |= wfixed;
332 326
333 /* Channel is implicitly zero, initialized to 0 by default */ 327 /* Channel is implicitly zero, initialized to 0 by default */
334 328 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
335 desc->raddr = raddr; 329 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
336 desc->waddr = waddr; 330
337 desc->next = lower_32_bits(ndesc_phys); 331 csrwr32(0, desc, sgdma_descroffs(pad1));
338 desc->control = ctrl; 332 csrwr32(0, desc, sgdma_descroffs(pad2));
339 desc->status = 0; 333 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
340 desc->rburst = 0; 334
341 desc->wburst = 0; 335 csrwr8(ctrl, desc, sgdma_descroffs(control));
342 desc->bytes = length; 336 csrwr8(0, desc, sgdma_descroffs(status));
343 desc->bytes_xferred = 0; 337 csrwr8(0, desc, sgdma_descroffs(wburst));
338 csrwr8(0, desc, sgdma_descroffs(rburst));
339 csrwr16(length, desc, sgdma_descroffs(bytes));
340 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
344} 341}
345 342
346/* If hardware is busy, don't restart async read. 343/* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
351 */ 348 */
352static int sgdma_async_read(struct altera_tse_private *priv) 349static int sgdma_async_read(struct altera_tse_private *priv)
353{ 350{
354 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 351 struct sgdma_descrip __iomem *descbase =
355 struct sgdma_descrip *descbase = 352 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
356 (struct sgdma_descrip *)priv->rx_dma_desc;
357 353
358 struct sgdma_descrip *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
359 struct sgdma_descrip *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
360 356
361 struct tse_buffer *rxbuffer = NULL; 357 struct tse_buffer *rxbuffer = NULL;
362 358
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
382 priv->sgdmadesclen, 378 priv->sgdmadesclen,
383 DMA_TO_DEVICE); 379 DMA_TO_DEVICE);
384 380
385 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 381 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
386 &csr->next_descrip); 382 priv->rx_dma_csr,
383 sgdma_csroffs(next_descrip));
387 384
388 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), 385 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
389 &csr->control); 386 priv->rx_dma_csr,
387 sgdma_csroffs(control));
390 388
391 return 1; 389 return 1;
392 } 390 }
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
395} 393}
396 394
397static int sgdma_async_write(struct altera_tse_private *priv, 395static int sgdma_async_write(struct altera_tse_private *priv,
398 struct sgdma_descrip *desc) 396 struct sgdma_descrip __iomem *desc)
399{ 397{
400 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
401
402 if (sgdma_txbusy(priv)) 398 if (sgdma_txbusy(priv))
403 return 0; 399 return 0;
404 400
405 /* clear control and status */ 401 /* clear control and status */
406 iowrite32(0, &csr->control); 402 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
407 iowrite32(0x1f, &csr->status); 403 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
408 404
409 dma_sync_single_for_device(priv->device, priv->txdescphys, 405 dma_sync_single_for_device(priv->device, priv->txdescphys,
410 priv->sgdmadesclen, DMA_TO_DEVICE); 406 priv->sgdmadesclen, DMA_TO_DEVICE);
411 407
412 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 408 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
413 &csr->next_descrip); 409 priv->tx_dma_csr,
410 sgdma_csroffs(next_descrip));
414 411
415 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), 412 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
416 &csr->control); 413 priv->tx_dma_csr,
414 sgdma_csroffs(control));
417 415
418 return 1; 416 return 1;
419} 417}
420 418
421static dma_addr_t 419static dma_addr_t
422sgdma_txphysaddr(struct altera_tse_private *priv, 420sgdma_txphysaddr(struct altera_tse_private *priv,
423 struct sgdma_descrip *desc) 421 struct sgdma_descrip __iomem *desc)
424{ 422{
425 dma_addr_t paddr = priv->txdescmem_busaddr; 423 dma_addr_t paddr = priv->txdescmem_busaddr;
426 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 424 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
429 427
430static dma_addr_t 428static dma_addr_t
431sgdma_rxphysaddr(struct altera_tse_private *priv, 429sgdma_rxphysaddr(struct altera_tse_private *priv,
432 struct sgdma_descrip *desc) 430 struct sgdma_descrip __iomem *desc)
433{ 431{
434 dma_addr_t paddr = priv->rxdescmem_busaddr; 432 dma_addr_t paddr = priv->rxdescmem_busaddr;
435 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 433 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
518 */ 516 */
519static int sgdma_rxbusy(struct altera_tse_private *priv) 517static int sgdma_rxbusy(struct altera_tse_private *priv)
520{ 518{
521 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 519 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
522 return ioread32(&csr->status) & SGDMA_STSREG_BUSY; 520 & SGDMA_STSREG_BUSY;
523} 521}
524 522
525/* waits for the tx sgdma to finish it's current operation, returns 0 523/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
528static int sgdma_txbusy(struct altera_tse_private *priv) 526static int sgdma_txbusy(struct altera_tse_private *priv)
529{ 527{
530 int delay = 0; 528 int delay = 0;
531 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
532 529
533 /* if DMA is busy, wait for current transactino to finish */ 530 /* if DMA is busy, wait for current transactino to finish */
534 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) 531 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
532 & SGDMA_STSREG_BUSY) && (delay++ < 100))
535 udelay(1); 533 udelay(1);
536 534
537 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { 535 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
536 & SGDMA_STSREG_BUSY) {
538 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 537 netdev_err(priv->dev, "timeout waiting for tx dma\n");
539 return 1; 538 return 1;
540 } 539 }
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
19 19
20/* SGDMA descriptor structure */ 20/* SGDMA descriptor structure */
21struct sgdma_descrip { 21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */ 22 u32 raddr; /* address of data to be read */
23 unsigned int pad1; 23 u32 pad1;
24 unsigned int waddr; 24 u32 waddr;
25 unsigned int pad2; 25 u32 pad2;
26 unsigned int next; 26 u32 next;
27 unsigned int pad3; 27 u32 pad3;
28 unsigned short bytes; 28 u16 bytes;
29 unsigned char rburst; 29 u8 rburst;
30 unsigned char wburst; 30 u8 wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */ 31 u16 bytes_xferred; /* 16 bits, bytes xferred */
32 32
33 /* bit 0: error 33 /* bit 0: error
34 * bit 1: length error 34 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
39 * bit 6: reserved 39 * bit 6: reserved
40 * bit 7: status eop for recv case 40 * bit 7: status eop for recv case
41 */ 41 */
42 unsigned char status; 42 u8 status;
43 43
44 /* bit 0: eop 44 /* bit 0: eop
45 * bit 1: read_fixed 45 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
47 * bits 3,4,5,6: Channel (always 0) 47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned 48 * bit 7: hardware owned
49 */ 49 */
50 unsigned char control; 50 u8 control;
51} __packed; 51} __packed;
52 52
53 53
@@ -101,6 +101,8 @@ struct sgdma_csr {
101 u32 pad3[3]; 101 u32 pad3[3];
102}; 102};
103 103
104#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
105#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
104 106
105#define SGDMA_STSREG_ERR BIT(0) /* Error */ 107#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */ 108#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 465c4aabebbd..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -357,6 +357,8 @@ struct altera_tse_mac {
357 u32 reserved5[42]; 357 u32 reserved5[42];
358}; 358};
359 359
360#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
361
360/* Transmit and Receive Command Registers Bit Definitions 362/* Transmit and Receive Command Registers Bit Definitions
361 */ 363 */
362#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) 364#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
487 */ 489 */
488void altera_tse_set_ethtool_ops(struct net_device *); 490void altera_tse_set_ethtool_ops(struct net_device *);
489 491
492static inline
493u32 csrrd32(void __iomem *mac, size_t offs)
494{
495 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
496 return readl(paddr);
497}
498
499static inline
500u16 csrrd16(void __iomem *mac, size_t offs)
501{
502 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
503 return readw(paddr);
504}
505
506static inline
507u8 csrrd8(void __iomem *mac, size_t offs)
508{
509 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
510 return readb(paddr);
511}
512
513static inline
514void csrwr32(u32 val, void __iomem *mac, size_t offs)
515{
516 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
517
518 writel(val, paddr);
519}
520
521static inline
522void csrwr16(u16 val, void __iomem *mac, size_t offs)
523{
524 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
525
526 writew(val, paddr);
527}
528
529static inline
530void csrwr8(u8 val, void __iomem *mac, size_t offs)
531{
532 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
533
534 writeb(val, paddr);
535}
536
490#endif /* __ALTERA_TSE_H__ */ 537#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 76133caffa78..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf) 96 u64 *buf)
97{ 97{
98 struct altera_tse_private *priv = netdev_priv(dev); 98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext; 99 u64 ext;
101 100
102 buf[0] = ioread32(&mac->frames_transmitted_ok); 101 buf[0] = csrrd32(priv->mac_dev,
103 buf[1] = ioread32(&mac->frames_received_ok); 102 tse_csroffs(frames_transmitted_ok));
104 buf[2] = ioread32(&mac->frames_check_sequence_errors); 103 buf[1] = csrrd32(priv->mac_dev,
105 buf[3] = ioread32(&mac->alignment_errors); 104 tse_csroffs(frames_received_ok));
105 buf[2] = csrrd32(priv->mac_dev,
106 tse_csroffs(frames_check_sequence_errors));
107 buf[3] = csrrd32(priv->mac_dev,
108 tse_csroffs(alignment_errors));
106 109
107 /* Extended aOctetsTransmittedOK counter */ 110 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; 111 ext = (u64) csrrd32(priv->mac_dev,
109 ext |= ioread32(&mac->octets_transmitted_ok); 112 tse_csroffs(msb_octets_transmitted_ok)) << 32;
113
114 ext |= csrrd32(priv->mac_dev,
115 tse_csroffs(octets_transmitted_ok));
110 buf[4] = ext; 116 buf[4] = ext;
111 117
112 /* Extended aOctetsReceivedOK counter */ 118 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; 119 ext = (u64) csrrd32(priv->mac_dev,
114 ext |= ioread32(&mac->octets_received_ok); 120 tse_csroffs(msb_octets_received_ok)) << 32;
121
122 ext |= csrrd32(priv->mac_dev,
123 tse_csroffs(octets_received_ok));
115 buf[5] = ext; 124 buf[5] = ext;
116 125
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); 126 buf[6] = csrrd32(priv->mac_dev,
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); 127 tse_csroffs(tx_pause_mac_ctrl_frames));
119 buf[8] = ioread32(&mac->if_in_errors); 128 buf[7] = csrrd32(priv->mac_dev,
120 buf[9] = ioread32(&mac->if_out_errors); 129 tse_csroffs(rx_pause_mac_ctrl_frames));
121 buf[10] = ioread32(&mac->if_in_ucast_pkts); 130 buf[8] = csrrd32(priv->mac_dev,
122 buf[11] = ioread32(&mac->if_in_multicast_pkts); 131 tse_csroffs(if_in_errors));
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts); 132 buf[9] = csrrd32(priv->mac_dev,
124 buf[13] = ioread32(&mac->if_out_discards); 133 tse_csroffs(if_out_errors));
125 buf[14] = ioread32(&mac->if_out_ucast_pkts); 134 buf[10] = csrrd32(priv->mac_dev,
126 buf[15] = ioread32(&mac->if_out_multicast_pkts); 135 tse_csroffs(if_in_ucast_pkts));
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts); 136 buf[11] = csrrd32(priv->mac_dev,
128 buf[17] = ioread32(&mac->ether_stats_drop_events); 137 tse_csroffs(if_in_multicast_pkts));
138 buf[12] = csrrd32(priv->mac_dev,
139 tse_csroffs(if_in_broadcast_pkts));
140 buf[13] = csrrd32(priv->mac_dev,
141 tse_csroffs(if_out_discards));
142 buf[14] = csrrd32(priv->mac_dev,
143 tse_csroffs(if_out_ucast_pkts));
144 buf[15] = csrrd32(priv->mac_dev,
145 tse_csroffs(if_out_multicast_pkts));
146 buf[16] = csrrd32(priv->mac_dev,
147 tse_csroffs(if_out_broadcast_pkts));
148 buf[17] = csrrd32(priv->mac_dev,
149 tse_csroffs(ether_stats_drop_events));
129 150
130 /* Extended etherStatsOctets counter */ 151 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; 152 ext = (u64) csrrd32(priv->mac_dev,
132 ext |= ioread32(&mac->ether_stats_octets); 153 tse_csroffs(msb_ether_stats_octets)) << 32;
154 ext |= csrrd32(priv->mac_dev,
155 tse_csroffs(ether_stats_octets));
133 buf[18] = ext; 156 buf[18] = ext;
134 157
135 buf[19] = ioread32(&mac->ether_stats_pkts); 158 buf[19] = csrrd32(priv->mac_dev,
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts); 159 tse_csroffs(ether_stats_pkts));
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts); 160 buf[20] = csrrd32(priv->mac_dev,
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); 161 tse_csroffs(ether_stats_undersize_pkts));
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); 162 buf[21] = csrrd32(priv->mac_dev,
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); 163 tse_csroffs(ether_stats_oversize_pkts));
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); 164 buf[22] = csrrd32(priv->mac_dev,
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); 165 tse_csroffs(ether_stats_pkts_64_octets));
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); 166 buf[23] = csrrd32(priv->mac_dev,
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); 167 tse_csroffs(ether_stats_pkts_65to127_octets));
145 buf[29] = ioread32(&mac->ether_stats_jabbers); 168 buf[24] = csrrd32(priv->mac_dev,
146 buf[30] = ioread32(&mac->ether_stats_fragments); 169 tse_csroffs(ether_stats_pkts_128to255_octets));
170 buf[25] = csrrd32(priv->mac_dev,
171 tse_csroffs(ether_stats_pkts_256to511_octets));
172 buf[26] = csrrd32(priv->mac_dev,
173 tse_csroffs(ether_stats_pkts_512to1023_octets));
174 buf[27] = csrrd32(priv->mac_dev,
175 tse_csroffs(ether_stats_pkts_1024to1518_octets));
176 buf[28] = csrrd32(priv->mac_dev,
177 tse_csroffs(ether_stats_pkts_1519tox_octets));
178 buf[29] = csrrd32(priv->mac_dev,
179 tse_csroffs(ether_stats_jabbers));
180 buf[30] = csrrd32(priv->mac_dev,
181 tse_csroffs(ether_stats_fragments));
147} 182}
148 183
149static int tse_sset_count(struct net_device *dev, int sset) 184static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
178{ 213{
179 int i; 214 int i;
180 struct altera_tse_private *priv = netdev_priv(dev); 215 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf; 216 u32 *buf = regbuf;
183 217
184 /* Set version to a known value, so ethtool knows 218 /* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
196 regs->version = 1; 230 regs->version = 1;
197 231
198 for (i = 0; i < TSE_NUM_REGS; i++) 232 for (i = 0; i < TSE_NUM_REGS; i++)
199 buf[i] = ioread32(&tse_mac_regs[i]); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
200} 234}
201 235
202static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index e44a4aeb9701..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
100 */ 100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{ 102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 103 struct net_device *ndev = bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 104 struct altera_tse_private *priv = netdev_priv(ndev);
105 u32 data;
106 105
107 /* set MDIO address */ 106 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 107 csrwr32((mii_id & 0x1f), priv->mac_dev,
108 tse_csroffs(mdio_phy0_addr));
109 109
110 /* get the data */ 110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff; 111 return csrrd32(priv->mac_dev,
112 return data; 112 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
113} 113}
114 114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value) 116 u16 value)
117{ 117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 118 struct net_device *ndev = bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 119 struct altera_tse_private *priv = netdev_priv(ndev);
120 120
121 /* set MDIO address */ 121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 122 csrwr32((mii_id & 0x1f), priv->mac_dev,
123 tse_csroffs(mdio_phy0_addr));
123 124
124 /* write the data */ 125 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]); 126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
126 return 0; 127 return 0;
127} 128}
128 129
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
168 for (i = 0; i < PHY_MAX_ADDR; i++) 169 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL; 170 mdio->irq[i] = PHY_POLL;
170 171
171 mdio->priv = priv->mac_dev; 172 mdio->priv = dev;
172 mdio->parent = priv->device; 173 mdio->parent = priv->device;
173 174
174 ret = of_mdiobus_register(mdio, mdio_node); 175 ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
563 unsigned int nopaged_len = skb_headlen(skb); 564 unsigned int nopaged_len = skb_headlen(skb);
564 enum netdev_tx ret = NETDEV_TX_OK; 565 enum netdev_tx ret = NETDEV_TX_OK;
565 dma_addr_t dma_addr; 566 dma_addr_t dma_addr;
566 int txcomplete = 0;
567 567
568 spin_lock_bh(&priv->tx_lock); 568 spin_lock_bh(&priv->tx_lock);
569 569
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
599 dma_sync_single_for_device(priv->device, buffer->dma_addr, 599 dma_sync_single_for_device(priv->device, buffer->dma_addr,
600 buffer->len, DMA_TO_DEVICE); 600 buffer->len, DMA_TO_DEVICE);
601 601
602 txcomplete = priv->dmaops->tx_buffer(priv, buffer); 602 priv->dmaops->tx_buffer(priv, buffer);
603 603
604 skb_tx_timestamp(skb); 604 skb_tx_timestamp(skb);
605 605
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
698 struct altera_tse_private *priv = netdev_priv(dev); 698 struct altera_tse_private *priv = netdev_priv(dev);
699 struct phy_device *phydev = NULL; 699 struct phy_device *phydev = NULL;
700 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
701 int ret;
702 701
703 if (priv->phy_addr != POLL_PHY) { 702 if (priv->phy_addr != POLL_PHY) {
704 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
712 netdev_err(dev, "Could not attach to PHY\n"); 711 netdev_err(dev, "Could not attach to PHY\n");
713 712
714 } else { 713 } else {
714 int ret;
715 phydev = phy_find_first(priv->mdio); 715 phydev = phy_find_first(priv->mdio);
716 if (phydev == NULL) { 716 if (phydev == NULL) {
717 netdev_err(dev, "No PHY found\n"); 717 netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
791 791
792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
793{ 793{
794 struct altera_tse_mac *mac = priv->mac_dev;
795 u32 msb; 794 u32 msb;
796 u32 lsb; 795 u32 lsb;
797 796
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
799 lsb = ((addr[5] << 8) | addr[4]) & 0xffff; 798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
800 799
801 /* Set primary MAC address */ 800 /* Set primary MAC address */
802 iowrite32(msb, &mac->mac_addr_0); 801 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
803 iowrite32(lsb, &mac->mac_addr_1); 802 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
804} 803}
805 804
806/* MAC software reset. 805/* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
811 */ 810 */
812static int reset_mac(struct altera_tse_private *priv) 811static int reset_mac(struct altera_tse_private *priv)
813{ 812{
814 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
815 int counter; 813 int counter;
816 u32 dat; 814 u32 dat;
817 815
818 dat = ioread32(cmd_cfg_reg); 816 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
819 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 817 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
820 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; 818 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
821 iowrite32(dat, cmd_cfg_reg); 819 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
822 820
823 counter = 0; 821 counter = 0;
824 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 822 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
825 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) 823 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
824 MAC_CMDCFG_SW_RESET))
826 break; 825 break;
827 udelay(1); 826 udelay(1);
828 } 827 }
829 828
830 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
831 dat = ioread32(cmd_cfg_reg); 830 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
832 dat &= ~MAC_CMDCFG_SW_RESET; 831 dat &= ~MAC_CMDCFG_SW_RESET;
833 iowrite32(dat, cmd_cfg_reg); 832 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
834 return -1; 833 return -1;
835 } 834 }
836 return 0; 835 return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
840*/ 839*/
841static int init_mac(struct altera_tse_private *priv) 840static int init_mac(struct altera_tse_private *priv)
842{ 841{
843 struct altera_tse_mac *mac = priv->mac_dev;
844 unsigned int cmd = 0; 842 unsigned int cmd = 0;
845 u32 frm_length; 843 u32 frm_length;
846 844
847 /* Setup Rx FIFO */ 845 /* Setup Rx FIFO */
848 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, 846 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
849 &mac->rx_section_empty); 847 priv->mac_dev, tse_csroffs(rx_section_empty));
850 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); 848
851 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); 849 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
852 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); 850 tse_csroffs(rx_section_full));
851
852 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
853 tse_csroffs(rx_almost_empty));
854
855 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
856 tse_csroffs(rx_almost_full));
853 857
854 /* Setup Tx FIFO */ 858 /* Setup Tx FIFO */
855 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, 859 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
856 &mac->tx_section_empty); 860 priv->mac_dev, tse_csroffs(tx_section_empty));
857 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); 861
858 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); 862 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
859 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); 863 tse_csroffs(tx_section_full));
864
865 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
866 tse_csroffs(tx_almost_empty));
867
868 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
869 tse_csroffs(tx_almost_full));
860 870
861 /* MAC Address Configuration */ 871 /* MAC Address Configuration */
862 tse_update_mac_addr(priv, priv->dev->dev_addr); 872 tse_update_mac_addr(priv, priv->dev->dev_addr);
863 873
864 /* MAC Function Configuration */ 874 /* MAC Function Configuration */
865 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; 875 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
866 iowrite32(frm_length, &mac->frm_length); 876 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
867 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); 877
878 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
879 tse_csroffs(tx_ipg_length));
868 880
869 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 881 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
870 * start address 882 * start address
871 */ 883 */
872 tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 884 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
873 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 885 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
874 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 886
887 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
888 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
889 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
875 890
876 /* Set the MAC options */ 891 /* Set the MAC options */
877 cmd = ioread32(&mac->command_config); 892 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
878 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ 893 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
879 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 894 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
880 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 895 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
889 cmd &= ~MAC_CMDCFG_ETH_SPEED; 904 cmd &= ~MAC_CMDCFG_ETH_SPEED;
890 cmd &= ~MAC_CMDCFG_ENA_10; 905 cmd &= ~MAC_CMDCFG_ENA_10;
891 906
892 iowrite32(cmd, &mac->command_config); 907 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
893 908
894 iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta); 909 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
910 tse_csroffs(pause_quanta));
895 911
896 if (netif_msg_hw(priv)) 912 if (netif_msg_hw(priv))
897 dev_dbg(priv->device, 913 dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
904 */ 920 */
905static void tse_set_mac(struct altera_tse_private *priv, bool enable) 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
906{ 922{
907 struct altera_tse_mac *mac = priv->mac_dev; 923 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
908 u32 value = ioread32(&mac->command_config);
909 924
910 if (enable) 925 if (enable)
911 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; 926 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
912 else 927 else
913 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 928 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
914 929
915 iowrite32(value, &mac->command_config); 930 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
916} 931}
917 932
918/* Change the MTU 933/* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
942static void altera_tse_set_mcfilter(struct net_device *dev) 957static void altera_tse_set_mcfilter(struct net_device *dev)
943{ 958{
944 struct altera_tse_private *priv = netdev_priv(dev); 959 struct altera_tse_private *priv = netdev_priv(dev);
945 struct altera_tse_mac *mac = priv->mac_dev;
946 int i; 960 int i;
947 struct netdev_hw_addr *ha; 961 struct netdev_hw_addr *ha;
948 962
949 /* clear the hash filter */ 963 /* clear the hash filter */
950 for (i = 0; i < 64; i++) 964 for (i = 0; i < 64; i++)
951 iowrite32(0, &(mac->hash_table[i])); 965 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
952 966
953 netdev_for_each_mc_addr(ha, dev) { 967 netdev_for_each_mc_addr(ha, dev) {
954 unsigned int hash = 0; 968 unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
964 978
965 hash = (hash << 1) | xor_bit; 979 hash = (hash << 1) | xor_bit;
966 } 980 }
967 iowrite32(1, &(mac->hash_table[hash])); 981 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
968 } 982 }
969} 983}
970 984
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
972static void altera_tse_set_mcfilterall(struct net_device *dev) 986static void altera_tse_set_mcfilterall(struct net_device *dev)
973{ 987{
974 struct altera_tse_private *priv = netdev_priv(dev); 988 struct altera_tse_private *priv = netdev_priv(dev);
975 struct altera_tse_mac *mac = priv->mac_dev;
976 int i; 989 int i;
977 990
978 /* set the hash filter */ 991 /* set the hash filter */
979 for (i = 0; i < 64; i++) 992 for (i = 0; i < 64; i++)
980 iowrite32(1, &(mac->hash_table[i])); 993 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
981} 994}
982 995
983/* Set or clear the multicast filter for this adaptor 996/* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
985static void tse_set_rx_mode_hashfilter(struct net_device *dev) 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
986{ 999{
987 struct altera_tse_private *priv = netdev_priv(dev); 1000 struct altera_tse_private *priv = netdev_priv(dev);
988 struct altera_tse_mac *mac = priv->mac_dev;
989 1001
990 spin_lock(&priv->mac_cfg_lock); 1002 spin_lock(&priv->mac_cfg_lock);
991 1003
992 if (dev->flags & IFF_PROMISC) 1004 if (dev->flags & IFF_PROMISC)
993 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1005 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006 MAC_CMDCFG_PROMIS_EN);
994 1007
995 if (dev->flags & IFF_ALLMULTI) 1008 if (dev->flags & IFF_ALLMULTI)
996 altera_tse_set_mcfilterall(dev); 1009 altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1005static void tse_set_rx_mode(struct net_device *dev) 1018static void tse_set_rx_mode(struct net_device *dev)
1006{ 1019{
1007 struct altera_tse_private *priv = netdev_priv(dev); 1020 struct altera_tse_private *priv = netdev_priv(dev);
1008 struct altera_tse_mac *mac = priv->mac_dev;
1009 1021
1010 spin_lock(&priv->mac_cfg_lock); 1022 spin_lock(&priv->mac_cfg_lock);
1011 1023
1012 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1024 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1013 !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) 1025 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1014 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1026 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027 MAC_CMDCFG_PROMIS_EN);
1015 else 1028 else
1016 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1029 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030 MAC_CMDCFG_PROMIS_EN);
1017 1031
1018 spin_unlock(&priv->mac_cfg_lock); 1032 spin_unlock(&priv->mac_cfg_lock);
1019} 1033}
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
1362 of_property_read_bool(pdev->dev.of_node, 1376 of_property_read_bool(pdev->dev.of_node,
1363 "altr,has-hash-multicast-filter"); 1377 "altr,has-hash-multicast-filter");
1364 1378
1379 /* Set hash filter to not set for now until the
1380 * multicast filter receive issue is debugged
1381 */
1382 priv->hash_filter = 0;
1383
1365 /* get supplemental address settings for this instance */ 1384 /* get supplemental address settings for this instance */
1366 priv->added_unicast = 1385 priv->added_unicast =
1367 of_property_read_bool(pdev->dev.of_node, 1386 of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
1493 return 0; 1512 return 0;
1494} 1513}
1495 1514
1496struct altera_dmaops altera_dtype_sgdma = { 1515static const struct altera_dmaops altera_dtype_sgdma = {
1497 .altera_dtype = ALTERA_DTYPE_SGDMA, 1516 .altera_dtype = ALTERA_DTYPE_SGDMA,
1498 .dmamask = 32, 1517 .dmamask = 32,
1499 .reset_dma = sgdma_reset, 1518 .reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
1512 .start_rxdma = sgdma_start_rxdma, 1531 .start_rxdma = sgdma_start_rxdma,
1513}; 1532};
1514 1533
1515struct altera_dmaops altera_dtype_msgdma = { 1534static const struct altera_dmaops altera_dtype_msgdma = {
1516 .altera_dtype = ALTERA_DTYPE_MSGDMA, 1535 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1517 .dmamask = 64, 1536 .dmamask = 64,
1518 .reset_dma = msgdma_reset, 1537 .reset_dma = msgdma_reset,
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
17#include "altera_tse.h" 17#include "altera_tse.h"
18#include "altera_utils.h" 18#include "altera_utils.h"
19 19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) 20void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
21{ 21{
22 u32 value = ioread32(ioaddr); 22 u32 value = csrrd32(ioaddr, offs);
23 value |= bit_mask; 23 value |= bit_mask;
24 iowrite32(value, ioaddr); 24 csrwr32(value, ioaddr, offs);
25} 25}
26 26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) 27void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
28{ 28{
29 u32 value = ioread32(ioaddr); 29 u32 value = csrrd32(ioaddr, offs);
30 value &= ~bit_mask; 30 value &= ~bit_mask;
31 iowrite32(value, ioaddr); 31 csrwr32(value, ioaddr, offs);
32} 32}
33 33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) 34int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
35{ 35{
36 u32 value = ioread32(ioaddr); 36 u32 value = csrrd32(ioaddr, offs);
37 return (value & bit_mask) ? 1 : 0; 37 return (value & bit_mask) ? 1 : 0;
38} 38}
39 39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) 40int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
41{ 41{
42 u32 value = ioread32(ioaddr); 42 u32 value = csrrd32(ioaddr, offs);
43 return (value & bit_mask) ? 0 : 1; 43 return (value & bit_mask) ? 0 : 1;
44} 44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
19#ifndef __ALTERA_UTILS_H__ 19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__ 20#define __ALTERA_UTILS_H__
21 21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); 22void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); 23void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); 24int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); 25int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
26 26
27#endif /* __ALTERA_UTILS_H__*/ 27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b260913db236..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10057{
10058 u8 major, minor, version; 10058 u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10352 /* Reset should be performed after BRB is emptied */ 10352 /* Reset should be performed after BRB is emptied */
10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10354 u32 timer_count = 1000; 10354 u32 timer_count = 1000;
10355 bool need_write = true;
10355 10356
10356 /* Close the MAC Rx to prevent BRB from filling up */ 10357 /* Close the MAC Rx to prevent BRB from filling up */
10357 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10358 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10398 * cleaning methods - might be redundant but harmless. 10399 * cleaning methods - might be redundant but harmless.
10399 */ 10400 */
10400 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10401 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10401 bnx2x_prev_unload_undi_mf(bp); 10402 if (need_write) {
10403 bnx2x_prev_unload_undi_mf(bp);
10404 need_write = false;
10405 }
10402 } else if (prev_undi) { 10406 } else if (prev_undi) {
10403 /* If UNDI resides in memory, 10407 /* If UNDI resides in memory,
10404 * manually increment it 10408 * manually increment it
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 81cc2d9831c2..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2695,7 +2695,7 @@ out:
2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2696 } 2696 }
2697 2697
2698 return 0; 2698 return rc;
2699} 2699}
2700 2700
2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0c067e8564dd..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
747out: 747out:
748 bnx2x_vfpf_finalize(bp, &req->first_tlv); 748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
749 749
750 return 0; 750 return rc;
751} 751}
752 752
753/* request pf to config rss table for vf queues*/ 753/* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c
3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */
20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/skbuff.h>
34#include <linux/hrtimer.h>
35#include <linux/interrupt.h>
36#include <linux/stat.h>
37
38#define TIMER_INTERVAL_NSEC 20000
39
40#define INFO_BLOCK_SIZE 0x10
41#define INFO_BLOCK_TYPE 0x0
42#define INFO_BLOCK_REV 0x2
43#define INFO_BLOCK_BLK_CNT 0x4
44#define INFO_BLOCK_TX_CHAN 0x4
45#define INFO_BLOCK_RX_CHAN 0x5
46#define INFO_BLOCK_OFFSET 0x8
47
48#define EC_MII_OFFSET 0x4
49#define EC_FIFO_OFFSET 0x8
50#define EC_MAC_OFFSET 0xc
51
52#define MAC_FRAME_ERR_CNT 0x0
53#define MAC_RX_ERR_CNT 0x1
54#define MAC_CRC_ERR_CNT 0x2
55#define MAC_LNK_LST_ERR_CNT 0x3
56#define MAC_TX_FRAME_CNT 0x10
57#define MAC_RX_FRAME_CNT 0x14
58#define MAC_TX_FIFO_LVL 0x20
59#define MAC_DROPPED_FRMS 0x28
60#define MAC_CONNECTED_CCAT_FLAG 0x78
61
62#define MII_MAC_ADDR 0x8
63#define MII_MAC_FILT_FLAG 0xe
64#define MII_LINK_STATUS 0xf
65
66#define FIFO_TX_REG 0x0
67#define FIFO_TX_RESET 0x8
68#define FIFO_RX_REG 0x10
69#define FIFO_RX_ADDR_VALID (1u << 31)
70#define FIFO_RX_RESET 0x18
71
72#define DMA_CHAN_OFFSET 0x1000
73#define DMA_CHAN_SIZE 0x8
74
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76
77static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, }
80};
81MODULE_DEVICE_TABLE(pci, ids);
82
83struct rx_header {
84#define RXHDR_NEXT_ADDR_MASK 0xffffffu
85#define RXHDR_NEXT_VALID (1u << 31)
86 __le32 next;
87#define RXHDR_NEXT_RECV_FLAG 0x1
88 __le32 recv;
89#define RXHDR_LEN_MASK 0xfffu
90 __le16 len;
91 __le16 port;
92 __le32 reserved;
93 u8 timestamp[8];
94} __packed;
95
96#define PKT_PAYLOAD_SIZE 0x7e8
97struct rx_desc {
98 struct rx_header header;
99 u8 data[PKT_PAYLOAD_SIZE];
100} __packed;
101
102struct tx_header {
103 __le16 len;
104#define TX_HDR_PORT_0 0x1
105#define TX_HDR_PORT_1 0x2
106 u8 port;
107 u8 ts_enable;
108#define TX_HDR_SENT 0x1
109 __le32 sent;
110 u8 timestamp[8];
111} __packed;
112
113struct tx_desc {
114 struct tx_header header;
115 u8 data[PKT_PAYLOAD_SIZE];
116} __packed;
117
118#define FIFO_SIZE 64
119
120static long polling_frequency = TIMER_INTERVAL_NSEC;
121
122struct bhf_dma {
123 u8 *buf;
124 size_t len;
125 dma_addr_t buf_phys;
126
127 u8 *alloc;
128 size_t alloc_len;
129 dma_addr_t alloc_phys;
130};
131
132struct ec_bhf_priv {
133 struct net_device *net_dev;
134
135 struct pci_dev *dev;
136
137 void * __iomem io;
138 void * __iomem dma_io;
139
140 struct hrtimer hrtimer;
141
142 int tx_dma_chan;
143 int rx_dma_chan;
144 void * __iomem ec_io;
145 void * __iomem fifo_io;
146 void * __iomem mii_io;
147 void * __iomem mac_io;
148
149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs;
151 int rx_dnext;
152 int rx_dcount;
153
154 struct bhf_dma tx_buf;
155 struct tx_desc *tx_descs;
156 int tx_dcount;
157 int tx_dnext;
158
159 u64 stat_rx_bytes;
160 u64 stat_tx_bytes;
161};
162
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
200
201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
203
204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
205}
206
207static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
208{
209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
210 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215}
216
217static int ec_bhf_desc_sent(struct tx_desc *desc)
218{
219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
220}
221
222static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
223{
224 if (unlikely(netif_queue_stopped(priv->net_dev))) {
225 /* Make sure that we perceive changes to tx_dnext. */
226 smp_rmb();
227
228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
229 netif_wake_queue(priv->net_dev);
230 }
231}
232
233static int ec_bhf_pkt_received(struct rx_desc *desc)
234{
235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
236}
237
238static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
239{
240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
241 priv->fifo_io + FIFO_RX_REG);
242}
243
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248
249 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) &
251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
252 u8 *data = desc->data;
253 struct sk_buff *skb;
254
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size;
264
265 netif_rx(skb);
266 } else {
267 dev_err_ratelimited(dev,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size);
270 }
271
272 desc->header.recv = 0;
273
274 ec_bhf_add_rx_desc(priv, desc);
275
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext];
278 }
279
280}
281
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
283{
284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
285 hrtimer);
286 ec_bhf_process_rx(priv);
287 ec_bhf_process_tx(priv);
288
289 if (!netif_running(priv->net_dev))
290 return HRTIMER_NORESTART;
291
292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
293 return HRTIMER_RESTART;
294}
295
296static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{
298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i;
300 void * __iomem ec_info;
301
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE);
313 if (type == ETHERCAT_MASTER_ID)
314 break;
315 }
316 if (i == block_count) {
317 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV;
319 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321
322 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0;
344}
345
346static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
347 struct net_device *net_dev)
348{
349 struct ec_bhf_priv *priv = netdev_priv(net_dev);
350 struct tx_desc *desc;
351 unsigned len;
352
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext];
356
357 skb_copy_and_csum_dev(skb, desc->data);
358 len = skb->len;
359
360 memset(&desc->header, 0, sizeof(desc->header));
361 desc->header.len = cpu_to_le16(len);
362 desc->header.port = TX_HDR_PORT_0;
363
364 ec_bhf_send_packet(priv, desc);
365
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived
370 * by timer routine.
371 */
372 smp_wmb();
373
374 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 }
379
380 priv->stat_tx_bytes += len;
381
382 dev_kfree_skb(skb);
383
384 return NETDEV_TX_OK;
385}
386
387static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
388 struct bhf_dma *buf,
389 int channel,
390 int size)
391{
392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
393 struct device *dev = PRIV_TO_DEV(priv);
394 u32 mask;
395
396 iowrite32(0xffffffff, priv->dma_io + offset);
397
398 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
406 * 2 * 2^mask bytes.
407 */
408 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len;
410
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL);
415 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n");
417 return -ENOMEM;
418 }
419
420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
422
423 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427
428 return 0;
429}
430
431static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
432{
433 int i = 0;
434
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
437 priv->tx_dnext = 0;
438
439 for (i = 0; i < priv->tx_dcount; i++)
440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
441}
442
443static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
444{
445 int i;
446
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
449 priv->rx_dnext = 0;
450
451 for (i = 0; i < priv->rx_dcount; i++) {
452 struct rx_desc *desc = &priv->rx_descs[i];
453 u32 next;
454
455 if (i != priv->rx_dcount - 1)
456 next = (u8 *)(desc + 1) - priv->rx_buf.buf;
457 else
458 next = 0;
459 next |= RXHDR_NEXT_VALID;
460 desc->header.next = cpu_to_le32(next);
461 desc->header.recv = 0;
462 ec_bhf_add_rx_desc(priv, desc);
463 }
464}
465
466static int ec_bhf_open(struct net_device *net_dev)
467{
468 struct ec_bhf_priv *priv = netdev_priv(net_dev);
469 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0;
471
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv);
475
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
477 FIFO_SIZE * sizeof(struct rx_desc));
478 if (err) {
479 dev_err(dev, "Failed to allocate rx buffer\n");
480 goto out;
481 }
482 ec_bhf_setup_rx_descs(priv);
483
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free;
492 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv);
499
500 netif_start_queue(net_dev);
501
502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
503 priv->hrtimer.function = ec_bhf_timer_fun;
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL);
506
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0;
512
513error_rx_free:
514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
515 priv->rx_buf.alloc_len);
516out:
517 return err;
518}
519
520static int ec_bhf_stop(struct net_device *net_dev)
521{
522 struct ec_bhf_priv *priv = netdev_priv(net_dev);
523 struct device *dev = PRIV_TO_DEV(priv);
524
525 hrtimer_cancel(&priv->hrtimer);
526
527 ec_bhf_reset(priv);
528
529 netif_tx_disable(net_dev);
530
531 dma_free_coherent(dev, priv->tx_buf.alloc_len,
532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
533 dma_free_coherent(dev, priv->rx_buf.alloc_len,
534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
535
536 return 0;
537}
538
539static struct rtnl_link_stats64 *
540ec_bhf_get_stats(struct net_device *net_dev,
541 struct rtnl_link_stats64 *stats)
542{
543 struct ec_bhf_priv *priv = netdev_priv(net_dev);
544
545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
551
552 stats->tx_bytes = priv->stat_tx_bytes;
553 stats->rx_bytes = priv->stat_rx_bytes;
554
555 return stats;
556}
557
558static const struct net_device_ops ec_bhf_netdev_ops = {
559 .ndo_start_xmit = ec_bhf_start_xmit,
560 .ndo_open = ec_bhf_open,
561 .ndo_stop = ec_bhf_stop,
562 .ndo_get_stats64 = ec_bhf_get_stats,
563 .ndo_change_mtu = eth_change_mtu,
564 .ndo_validate_addr = eth_validate_addr,
565 .ndo_set_mac_address = eth_mac_addr
566};
567
568static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{
570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io;
573 void * __iomem io;
574 int err = 0;
575
576 err = pci_enable_device(dev);
577 if (err)
578 return err;
579
580 pci_set_master(dev);
581
582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
583 if (err) {
584 dev_err(&dev->dev,
585 "Required dma mask not supported, failed to initialize device\n");
586 err = -EIO;
587 goto err_disable_dev;
588 }
589
590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
591 if (err) {
592 dev_err(&dev->dev,
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev;
595 }
596
597 err = pci_request_regions(dev, "ec_bhf");
598 if (err) {
599 dev_err(&dev->dev, "Failed to request pci memory regions\n");
600 goto err_disable_dev;
601 }
602
603 io = pci_iomap(dev, 0, 0);
604 if (!io) {
605 dev_err(&dev->dev, "Failed to map pci card memory bar 0");
606 err = -EIO;
607 goto err_release_regions;
608 }
609
610 dma_io = pci_iomap(dev, 2, 0);
611 if (!dma_io) {
612 dev_err(&dev->dev, "Failed to map pci card memory bar 2");
613 err = -EIO;
614 goto err_unmap;
615 }
616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) {
619 err = -ENOMEM;
620 goto err_unmap_dma_io;
621 }
622
623 pci_set_drvdata(dev, net_dev);
624 SET_NETDEV_DEV(net_dev, &dev->dev);
625
626 net_dev->features = 0;
627 net_dev->flags |= IFF_NOARP;
628
629 net_dev->netdev_ops = &ec_bhf_netdev_ops;
630
631 priv = netdev_priv(net_dev);
632 priv->net_dev = net_dev;
633 priv->io = io;
634 priv->dma_io = dma_io;
635 priv->dev = dev;
636
637 err = ec_bhf_setup_offsets(priv);
638 if (err < 0)
639 goto err_free_net_dev;
640
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev);
647 if (err < 0)
648 goto err_free_net_dev;
649
650 return 0;
651
652err_free_net_dev:
653 free_netdev(net_dev);
654err_unmap_dma_io:
655 pci_iounmap(dev, dma_io);
656err_unmap:
657 pci_iounmap(dev, io);
658err_release_regions:
659 pci_release_regions(dev);
660err_disable_dev:
661 pci_clear_master(dev);
662 pci_disable_device(dev);
663
664 return err;
665}
666
667static void ec_bhf_remove(struct pci_dev *dev)
668{
669 struct net_device *net_dev = pci_get_drvdata(dev);
670 struct ec_bhf_priv *priv = netdev_priv(net_dev);
671
672 unregister_netdev(net_dev);
673 free_netdev(net_dev);
674
675 pci_iounmap(dev, priv->dma_io);
676 pci_iounmap(dev, priv->io);
677 pci_release_regions(dev);
678 pci_clear_master(dev);
679 pci_disable_device(dev);
680}
681
682static struct pci_driver pci_driver = {
683 .name = "ec_bhf",
684 .id_table = ids,
685 .probe = ec_bhf_probe,
686 .remove = ec_bhf_remove,
687};
688
689static int __init ec_bhf_init(void)
690{
691 return pci_register_driver(&pci_driver);
692}
693
694static void __exit ec_bhf_exit(void)
695{
696 pci_unregister_driver(&pci_driver);
697}
698
699module_init(ec_bhf_init);
700module_exit(ec_bhf_exit);
701
702module_param(polling_frequency, long, S_IRUGO);
703MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4949 if (status) 4949 if (status)
4950 goto err; 4950 goto err;
4951 4951
4952 /* On some BE3 FW versions, after a HW reset,
4953 * interrupts will remain disabled for each function.
4954 * So, explicitly enable interrupts
4955 */
4956 be_intr_set(adapter, true);
4957
4952 /* tell fw we're ready to fire cmds */ 4958 /* tell fw we're ready to fire cmds */
4953 status = be_cmd_fw_init(adapter); 4959 status = be_cmd_fw_init(adapter);
4954 if (status) 4960 if (status)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
1988 return idx; 1988 return idx;
1989} 1989}
1990 1990
1991static void 1991static int
1992jme_fill_tx_map(struct pci_dev *pdev, 1992jme_fill_tx_map(struct pci_dev *pdev,
1993 struct txdesc *txdesc, 1993 struct txdesc *txdesc,
1994 struct jme_buffer_info *txbi, 1994 struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
2005 len, 2005 len,
2006 PCI_DMA_TODEVICE); 2006 PCI_DMA_TODEVICE);
2007 2007
2008 if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
2009 return -EINVAL;
2010
2008 pci_dma_sync_single_for_device(pdev, 2011 pci_dma_sync_single_for_device(pdev,
2009 dmaaddr, 2012 dmaaddr,
2010 len, 2013 len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
2021 2024
2022 txbi->mapping = dmaaddr; 2025 txbi->mapping = dmaaddr;
2023 txbi->len = len; 2026 txbi->len = len;
2027 return 0;
2024} 2028}
2025 2029
2026static void 2030static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
2031{
2032 struct jme_ring *txring = &(jme->txring[0]);
2033 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2034 int mask = jme->tx_ring_mask;
2035 int j;
2036
2037 for (j = 0 ; j < count ; j++) {
2038 ctxbi = txbi + ((startidx + j + 2) & (mask));
2039 pci_unmap_page(jme->pdev,
2040 ctxbi->mapping,
2041 ctxbi->len,
2042 PCI_DMA_TODEVICE);
2043
2044 ctxbi->mapping = 0;
2045 ctxbi->len = 0;
2046 }
2047
2048}
2049
2050static int
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2051jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{ 2052{
2029 struct jme_ring *txring = &(jme->txring[0]); 2053 struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2034 int mask = jme->tx_ring_mask; 2058 int mask = jme->tx_ring_mask;
2035 const struct skb_frag_struct *frag; 2059 const struct skb_frag_struct *frag;
2036 u32 len; 2060 u32 len;
2061 int ret = 0;
2037 2062
2038 for (i = 0 ; i < nr_frags ; ++i) { 2063 for (i = 0 ; i < nr_frags ; ++i) {
2039 frag = &skb_shinfo(skb)->frags[i]; 2064 frag = &skb_shinfo(skb)->frags[i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2065 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask)); 2066 ctxbi = txbi + ((idx + i + 2) & (mask));
2042 2067
2043 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2068 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag), 2069 skb_frag_page(frag),
2045 frag->page_offset, skb_frag_size(frag), hidma); 2070 frag->page_offset, skb_frag_size(frag), hidma);
2071 if (ret) {
2072 jme_drop_tx_map(jme, idx, i);
2073 goto out;
2074 }
2075
2046 } 2076 }
2047 2077
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2078 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask)); 2079 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask)); 2080 ctxbi = txbi + ((idx + 1) & (mask));
2051 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2081 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052 offset_in_page(skb->data), len, hidma); 2082 offset_in_page(skb->data), len, hidma);
2083 if (ret)
2084 jme_drop_tx_map(jme, idx, i);
2085
2086out:
2087 return ret;
2053 2088
2054} 2089}
2055 2090
2091
2056static int 2092static int
2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2093jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2058{ 2094{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2131 struct txdesc *txdesc; 2167 struct txdesc *txdesc;
2132 struct jme_buffer_info *txbi; 2168 struct jme_buffer_info *txbi;
2133 u8 flags; 2169 u8 flags;
2170 int ret = 0;
2134 2171
2135 txdesc = (struct txdesc *)txring->desc + idx; 2172 txdesc = (struct txdesc *)txring->desc + idx;
2136 txbi = txring->bufinf + idx; 2173 txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2155 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2192 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2156 jme_tx_csum(jme, skb, &flags); 2193 jme_tx_csum(jme, skb, &flags);
2157 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2194 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2158 jme_map_tx_skb(jme, skb, idx); 2195 ret = jme_map_tx_skb(jme, skb, idx);
2196 if (ret)
2197 return ret;
2198
2159 txdesc->desc1.flags = flags; 2199 txdesc->desc1.flags = flags;
2160 /* 2200 /*
2161 * Set tx buffer info after telling NIC to send 2201 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2228 return NETDEV_TX_BUSY; 2268 return NETDEV_TX_BUSY;
2229 } 2269 }
2230 2270
2231 jme_fill_tx_desc(jme, skb, idx); 2271 if (jme_fill_tx_desc(jme, skb, idx))
2272 return NETDEV_TX_OK;
2232 2273
2233 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2274 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2234 TXCS_SELECT_QUEUE0 | 2275 TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
1253 }, 1253 },
1254 { 1254 {
1255 .opcode = MLX4_CMD_UPDATE_QP, 1255 .opcode = MLX4_CMD_UPDATE_QP,
1256 .has_inbox = false, 1256 .has_inbox = true,
1257 .has_outbox = false, 1257 .has_outbox = false,
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = mlx4_CMD_EPERM_wrapper 1261 .wrapper = mlx4_UPDATE_QP_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..212cea440f90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1195 struct mlx4_cmd_mailbox *outbox, 1195 struct mlx4_cmd_mailbox *outbox,
1196 struct mlx4_cmd_info *cmd); 1196 struct mlx4_cmd_info *cmd);
1197 1197
1198int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox,
1201 struct mlx4_cmd_mailbox *outbox,
1202 struct mlx4_cmd_info *cmd);
1203
1198int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1204int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr, 1205 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox, 1206 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
389 389
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params)
396{
397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0;
400 int err = 0;
401
402 mailbox = mlx4_alloc_cmd_mailbox(dev);
403 if (IS_ERR(mailbox))
404 return PTR_ERR(mailbox);
405
406 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
407
408 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
409 return -EINVAL;
410
411 if (attr & MLX4_UPDATE_QP_SMAC) {
412 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 }
415
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
417
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE);
421
422 mlx4_free_cmd_mailbox(dev, mailbox);
423 return err;
424}
425EXPORT_SYMBOL_GPL(mlx4_update_qp);
426
392void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 427void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
393{ 428{
394 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 429 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1c3fdd4a1f7d..8f1254a79832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3895 3895
3896} 3896}
3897 3897
3898#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3899int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3900 struct mlx4_vhcr *vhcr,
3901 struct mlx4_cmd_mailbox *inbox,
3902 struct mlx4_cmd_mailbox *outbox,
3903 struct mlx4_cmd_info *cmd_info)
3904{
3905 int err;
3906 u32 qpn = vhcr->in_modifier & 0xffffff;
3907 struct res_qp *rqp;
3908 u64 mac;
3909 unsigned port;
3910 u64 pri_addr_path_mask;
3911 struct mlx4_update_qp_context *cmd;
3912 int smac_index;
3913
3914 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3915
3916 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3917 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3918 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3919 return -EPERM;
3920
3921 /* Just change the smac for the QP */
3922 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3923 if (err) {
3924 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3925 return err;
3926 }
3927
3928 port = (rqp->sched_queue >> 6 & 1) + 1;
3929 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3930 err = mac_find_smac_ix_in_slave(dev, slave, port,
3931 smac_index, &mac);
3932 if (err) {
3933 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3934 qpn, smac_index);
3935 goto err_mac;
3936 }
3937
3938 err = mlx4_cmd(dev, inbox->dma,
3939 vhcr->in_modifier, 0,
3940 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3941 MLX4_CMD_NATIVE);
3942 if (err) {
3943 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3944 goto err_mac;
3945 }
3946
3947err_mac:
3948 put_res(dev, slave, qpn, RES_QP);
3949 return err;
3950}
3951
3898int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3952int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3899 struct mlx4_vhcr *vhcr, 3953 struct mlx4_vhcr *vhcr,
3900 struct mlx4_cmd_mailbox *inbox, 3954 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1719 tx_ring->producer; 1719 tx_ring->producer;
1720} 1720}
1721 1721
1722static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1723 struct net_device *netdev)
1724{
1725 int err;
1726
1727 netdev->num_tx_queues = adapter->drv_tx_rings;
1728 netdev->real_num_tx_queues = adapter->drv_tx_rings;
1729
1730 err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
1731 if (err)
1732 netdev_err(netdev, "failed to set %d Tx queues\n",
1733 adapter->drv_tx_rings);
1734
1735 return err;
1736}
1737
1738struct qlcnic_nic_template { 1722struct qlcnic_nic_template {
1739 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1723 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1740 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1724 int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bc914859e38..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
2206 ahw->max_uc_count = count; 2206 ahw->max_uc_count = count;
2207} 2207}
2208 2208
2209static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
2210 u8 tx_queues, u8 rx_queues)
2211{
2212 struct net_device *netdev = adapter->netdev;
2213 int err = 0;
2214
2215 if (tx_queues) {
2216 err = netif_set_real_num_tx_queues(netdev, tx_queues);
2217 if (err) {
2218 netdev_err(netdev, "failed to set %d Tx queues\n",
2219 tx_queues);
2220 return err;
2221 }
2222 }
2223
2224 if (rx_queues) {
2225 err = netif_set_real_num_rx_queues(netdev, rx_queues);
2226 if (err)
2227 netdev_err(netdev, "failed to set %d Rx queues\n",
2228 rx_queues);
2229 }
2230
2231 return err;
2232}
2233
2209int 2234int
2210qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 2235qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2211 int pci_using_dac) 2236 int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2269 netdev->priv_flags |= IFF_UNICAST_FLT; 2294 netdev->priv_flags |= IFF_UNICAST_FLT;
2270 netdev->irq = adapter->msix_entries[0].vector; 2295 netdev->irq = adapter->msix_entries[0].vector;
2271 2296
2272 err = qlcnic_set_real_num_queues(adapter, netdev); 2297 err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
2298 adapter->drv_sds_rings);
2273 if (err) 2299 if (err)
2274 return err; 2300 return err;
2275 2301
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
2943 tx_ring->tx_stats.xmit_called, 2969 tx_ring->tx_stats.xmit_called,
2944 tx_ring->tx_stats.xmit_on, 2970 tx_ring->tx_stats.xmit_on,
2945 tx_ring->tx_stats.xmit_off); 2971 tx_ring->tx_stats.xmit_off);
2972
2973 if (tx_ring->crb_intr_mask)
2974 netdev_info(netdev, "crb_intr_mask=%d\n",
2975 readl(tx_ring->crb_intr_mask));
2976
2946 netdev_info(netdev, 2977 netdev_info(netdev,
2947 "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", 2978 "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
2948 readl(tx_ring->crb_intr_mask),
2949 readl(tx_ring->crb_cmd_producer), 2979 readl(tx_ring->crb_cmd_producer),
2950 tx_ring->producer, tx_ring->sw_consumer, 2980 tx_ring->producer, tx_ring->sw_consumer,
2951 le32_to_cpu(*(tx_ring->hw_consumer))); 2981 le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3978int qlcnic_setup_rings(struct qlcnic_adapter *adapter) 4008int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3979{ 4009{
3980 struct net_device *netdev = adapter->netdev; 4010 struct net_device *netdev = adapter->netdev;
4011 u8 tx_rings, rx_rings;
3981 int err; 4012 int err;
3982 4013
3983 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 4014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3984 return -EBUSY; 4015 return -EBUSY;
3985 4016
4017 tx_rings = adapter->drv_tss_rings;
4018 rx_rings = adapter->drv_rss_rings;
4019
3986 netif_device_detach(netdev); 4020 netif_device_detach(netdev);
4021
4022 err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
4023 if (err)
4024 goto done;
4025
3987 if (netif_running(netdev)) 4026 if (netif_running(netdev))
3988 __qlcnic_down(adapter, netdev); 4027 __qlcnic_down(adapter, netdev);
3989 4028
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
4003 return err; 4042 return err;
4004 } 4043 }
4005 4044
4006 netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); 4045 /* Check if we need to update real_num_{tx|rx}_queues because
4046 * qlcnic_setup_intr() may change Tx/Rx rings size
4047 */
4048 if ((tx_rings != adapter->drv_tx_rings) ||
4049 (rx_rings != adapter->drv_sds_rings)) {
4050 err = qlcnic_set_real_num_queues(adapter,
4051 adapter->drv_tx_rings,
4052 adapter->drv_sds_rings);
4053 if (err)
4054 goto done;
4055 }
4007 4056
4008 if (qlcnic_83xx_check(adapter)) { 4057 if (qlcnic_83xx_check(adapter)) {
4009 qlcnic_83xx_initialize_nic(adapter, 1); 4058 qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
156 efx->net_dev->rx_cpu_rmap = NULL; 156 efx->net_dev->rx_cpu_rmap = NULL;
157#endif 157#endif
158 158
159 /* Disable MSI/MSI-X interrupts */ 159 if (EFX_INT_MODE_USE_MSI(efx)) {
160 efx_for_each_channel(channel, efx) 160 /* Disable MSI/MSI-X interrupts */
161 free_irq(channel->irq, &efx->msi_context[channel->channel]); 161 efx_for_each_channel(channel, efx)
162 162 free_irq(channel->irq,
163 /* Disable legacy interrupt */ 163 &efx->msi_context[channel->channel]);
164 if (efx->legacy_irq) 164 } else {
165 /* Disable legacy interrupt */
165 free_irq(efx->legacy_irq, efx); 166 free_irq(efx->legacy_irq, efx);
167 }
166} 168}
167 169
168/* Register dump */ 170/* Register dump */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
1704 if (ret) { 1704 if (ret) {
1705 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1705 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706 __func__, ret); 1706 __func__, ret);
1707 goto phy_error; 1707 return ret;
1708 } 1708 }
1709 } 1709 }
1710 1710
@@ -1779,8 +1779,6 @@ init_error:
1779dma_desc_error: 1779dma_desc_error:
1780 if (priv->phydev) 1780 if (priv->phydev)
1781 phy_disconnect(priv->phydev); 1781 phy_disconnect(priv->phydev);
1782phy_error:
1783 clk_disable_unprepare(priv->stmmac_clk);
1784 1782
1785 return ret; 1783 return ret;
1786} 1784}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
246 int i; 246 int i;
247 247
248 for (i = 0; i < N_TX_RINGS; i++) 248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock(&cp->tx_lock[i]); 249 spin_lock_nested(&cp->tx_lock[i], i);
250} 250}
251 251
252static inline void cas_lock_all(struct cas *cp) 252static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1872 phyid = be32_to_cpup(parp+1); 1872 phyid = be32_to_cpup(parp+1);
1873 mdio = of_find_device_by_node(mdio_node); 1873 mdio = of_find_device_by_node(mdio_node);
1874 1874 of_node_put(mdio_node);
1875 if (strncmp(mdio->name, "gpio", 4) == 0) { 1875 if (!mdio) {
1876 /* GPIO bitbang MDIO driver attached */ 1876 pr_err("Missing mdio platform device\n");
1877 struct mii_bus *bus = dev_get_drvdata(&mdio->dev); 1877 return -EINVAL;
1878
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, bus->id, phyid);
1881 } else {
1882 /* davinci MDIO driver attached */
1883 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1884 PHY_ID_FMT, mdio->name, phyid);
1885 } 1878 }
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, mdio->name, phyid);
1886 1881
1887 mac_addr = of_get_mac_address(slave_node); 1882 mac_addr = of_get_mac_address(slave_node);
1888 if (mac_addr) 1883 if (mac_addr)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b0e2865a6810..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
458 struct macvlan_dev *vlan = netdev_priv(dev); 458 struct macvlan_dev *vlan = netdev_priv(dev);
459 struct net_device *lowerdev = vlan->lowerdev; 459 struct net_device *lowerdev = vlan->lowerdev;
460 460
461 if (change & IFF_ALLMULTI) 461 if (dev->flags & IFF_UP) {
462 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 462 if (change & IFF_ALLMULTI)
463 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
464 }
463} 465}
464 466
465static void macvlan_set_mac_lists(struct net_device *dev) 467static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
515#define MACVLAN_STATE_MASK \ 517#define MACVLAN_STATE_MASK \
516 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 518 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
517 519
520static int macvlan_get_nest_level(struct net_device *dev)
521{
522 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
523}
524
518static void macvlan_set_lockdep_class_one(struct net_device *dev, 525static void macvlan_set_lockdep_class_one(struct net_device *dev,
519 struct netdev_queue *txq, 526 struct netdev_queue *txq,
520 void *_unused) 527 void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
525 532
526static void macvlan_set_lockdep_class(struct net_device *dev) 533static void macvlan_set_lockdep_class(struct net_device *dev)
527{ 534{
528 lockdep_set_class(&dev->addr_list_lock, 535 lockdep_set_class_and_subclass(&dev->addr_list_lock,
529 &macvlan_netdev_addr_lock_key); 536 &macvlan_netdev_addr_lock_key,
537 macvlan_get_nest_level(dev));
530 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); 538 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
531} 539}
532 540
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
721 .ndo_fdb_add = macvlan_fdb_add, 729 .ndo_fdb_add = macvlan_fdb_add,
722 .ndo_fdb_del = macvlan_fdb_del, 730 .ndo_fdb_del = macvlan_fdb_del,
723 .ndo_fdb_dump = ndo_dflt_fdb_dump, 731 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level,
724}; 733};
725 734
726void macvlan_common_setup(struct net_device *dev) 735void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
849 vlan->dev = dev; 858 vlan->dev = dev;
850 vlan->port = port; 859 vlan->port = port;
851 vlan->set_features = MACVLAN_FEATURES; 860 vlan->set_features = MACVLAN_FEATURES;
861 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
852 862
853 vlan->mode = MACVLAN_MODE_VEPA; 863 vlan->mode = MACVLAN_MODE_VEPA;
854 if (data && data[IFLA_MACVLAN_MODE]) 864 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 9c4defdec67b..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
215 if (pdev->dev.of_node) { 215 if (pdev->dev.of_node) {
216 pdata = mdio_gpio_of_get_data(pdev); 216 pdata = mdio_gpio_of_get_data(pdev);
217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); 217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
218 if (bus_id < 0) {
219 dev_warn(&pdev->dev, "failed to get alias id\n");
220 bus_id = 0;
221 }
218 } else { 222 } else {
219 pdata = dev_get_platdata(&pdev->dev); 223 pdata = dev_get_platdata(&pdev->dev);
220 bus_id = pdev->id; 224 bus_id = pdev->id;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a972056b2249..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
715 struct delayed_work *dwork = to_delayed_work(work); 715 struct delayed_work *dwork = to_delayed_work(work);
716 struct phy_device *phydev = 716 struct phy_device *phydev =
717 container_of(dwork, struct phy_device, state_queue); 717 container_of(dwork, struct phy_device, state_queue);
718 int needs_aneg = 0, do_suspend = 0; 718 bool needs_aneg = false, do_suspend = false, do_resume = false;
719 int err = 0; 719 int err = 0;
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
727 case PHY_PENDING: 727 case PHY_PENDING:
728 break; 728 break;
729 case PHY_UP: 729 case PHY_UP:
730 needs_aneg = 1; 730 needs_aneg = true;
731 731
732 phydev->link_timeout = PHY_AN_TIMEOUT; 732 phydev->link_timeout = PHY_AN_TIMEOUT;
733 733
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
757 phydev->adjust_link(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev);
758 758
759 } else if (0 == phydev->link_timeout--) 759 } else if (0 == phydev->link_timeout--)
760 needs_aneg = 1; 760 needs_aneg = true;
761 break; 761 break;
762 case PHY_NOLINK: 762 case PHY_NOLINK:
763 err = phy_read_status(phydev); 763 err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
791 netif_carrier_on(phydev->attached_dev); 791 netif_carrier_on(phydev->attached_dev);
792 } else { 792 } else {
793 if (0 == phydev->link_timeout--) 793 if (0 == phydev->link_timeout--)
794 needs_aneg = 1; 794 needs_aneg = true;
795 } 795 }
796 796
797 phydev->adjust_link(phydev->attached_dev); 797 phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
827 phydev->link = 0; 827 phydev->link = 0;
828 netif_carrier_off(phydev->attached_dev); 828 netif_carrier_off(phydev->attached_dev);
829 phydev->adjust_link(phydev->attached_dev); 829 phydev->adjust_link(phydev->attached_dev);
830 do_suspend = 1; 830 do_suspend = true;
831 } 831 }
832 break; 832 break;
833 case PHY_RESUMING: 833 case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
876 } 876 }
877 phydev->adjust_link(phydev->attached_dev); 877 phydev->adjust_link(phydev->attached_dev);
878 } 878 }
879 do_resume = true;
879 break; 880 break;
880 } 881 }
881 882
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
883 884
884 if (needs_aneg) 885 if (needs_aneg)
885 err = phy_start_aneg(phydev); 886 err = phy_start_aneg(phydev);
886 887 else if (do_suspend)
887 if (do_suspend)
888 phy_suspend(phydev); 888 phy_suspend(phydev);
889 else if (do_resume)
890 phy_resume(phydev);
889 891
890 if (err < 0) 892 if (err < 0)
891 phy_error(phydev); 893 phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
614 err = phy_init_hw(phydev); 614 err = phy_init_hw(phydev);
615 if (err) 615 if (err)
616 phy_detach(phydev); 616 phy_detach(phydev);
617 617 else
618 phy_resume(phydev); 618 phy_resume(phydev);
619 619
620 return err; 620 return err;
621} 621}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
120 cdc_ncm_unbind(dev, intf); 120 cdc_ncm_unbind(dev, intf);
121} 121}
122 122
123/* verify that the ethernet protocol is IPv4 or IPv6 */
124static bool is_ip_proto(__be16 proto)
125{
126 switch (proto) {
127 case htons(ETH_P_IP):
128 case htons(ETH_P_IPV6):
129 return true;
130 }
131 return false;
132}
123 133
124static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) 134static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
125{ 135{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
128 struct cdc_ncm_ctx *ctx = info->ctx; 138 struct cdc_ncm_ctx *ctx = info->ctx;
129 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); 139 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
130 u16 tci = 0; 140 u16 tci = 0;
141 bool is_ip;
131 u8 *c; 142 u8 *c;
132 143
133 if (!ctx) 144 if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
137 if (skb->len <= ETH_HLEN) 148 if (skb->len <= ETH_HLEN)
138 goto error; 149 goto error;
139 150
151 /* Some applications using e.g. packet sockets will
152 * bypass the VLAN acceleration and create tagged
153 * ethernet frames directly. We primarily look for
154 * the accelerated out-of-band tag, but fall back if
155 * required
156 */
157 skb_reset_mac_header(skb);
158 if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
159 __vlan_get_tag(skb, &tci) == 0) {
160 is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
161 skb_pull(skb, VLAN_ETH_HLEN);
162 } else {
163 is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
164 skb_pull(skb, ETH_HLEN);
165 }
166
140 /* mapping VLANs to MBIM sessions: 167 /* mapping VLANs to MBIM sessions:
141 * no tag => IPS session <0> 168 * no tag => IPS session <0>
142 * 1 - 255 => IPS session <vlanid> 169 * 1 - 255 => IPS session <vlanid>
143 * 256 - 511 => DSS session <vlanid - 256> 170 * 256 - 511 => DSS session <vlanid - 256>
144 * 512 - 4095 => unsupported, drop 171 * 512 - 4095 => unsupported, drop
145 */ 172 */
146 vlan_get_tag(skb, &tci);
147
148 switch (tci & 0x0f00) { 173 switch (tci & 0x0f00) {
149 case 0x0000: /* VLAN ID 0 - 255 */ 174 case 0x0000: /* VLAN ID 0 - 255 */
150 /* verify that datagram is IPv4 or IPv6 */ 175 if (!is_ip)
151 skb_reset_mac_header(skb);
152 switch (eth_hdr(skb)->h_proto) {
153 case htons(ETH_P_IP):
154 case htons(ETH_P_IPV6):
155 break;
156 default:
157 goto error; 176 goto error;
158 }
159 c = (u8 *)&sign; 177 c = (u8 *)&sign;
160 c[3] = tci; 178 c[3] = tci;
161 break; 179 break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
169 "unsupported tci=0x%04x\n", tci); 187 "unsupported tci=0x%04x\n", tci);
170 goto error; 188 goto error;
171 } 189 }
172 skb_pull(skb, ETH_HLEN);
173 } 190 }
174 191
175 spin_lock_bh(&ctx->mtx); 192 spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
204 return; 221 return;
205 222
206 /* need to send the NA on the VLAN dev, if any */ 223 /* need to send the NA on the VLAN dev, if any */
207 if (tci) 224 rcu_read_lock();
225 if (tci) {
208 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
209 tci); 227 tci);
210 else 228 if (!netdev) {
229 rcu_read_unlock();
230 return;
231 }
232 } else {
211 netdev = dev->net; 233 netdev = dev->net;
212 if (!netdev) 234 }
213 return; 235 dev_hold(netdev);
236 rcu_read_unlock();
214 237
215 in6_dev = in6_dev_get(netdev); 238 in6_dev = in6_dev_get(netdev);
216 if (!in6_dev) 239 if (!in6_dev)
217 return; 240 goto out;
218 is_router = !!in6_dev->cnf.forwarding; 241 is_router = !!in6_dev->cnf.forwarding;
219 in6_dev_put(in6_dev); 242 in6_dev_put(in6_dev);
220 243
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
224 true /* solicited */, 247 true /* solicited */,
225 false /* override */, 248 false /* override */,
226 true /* inc_opt */); 249 true /* inc_opt */);
250out:
251 dev_put(netdev);
227} 252}
228 253
229static bool is_neigh_solicit(u8 *buf, size_t len) 254static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
95 95
96 if ((vif->type == NL80211_IFTYPE_AP || 96 if ((vif->type == NL80211_IFTYPE_AP ||
97 vif->type == NL80211_IFTYPE_MESH_POINT) && 97 vif->type == NL80211_IFTYPE_MESH_POINT) &&
98 bss_conf->enable_beacon) 98 bss_conf->enable_beacon) {
99 priv->reconfig_beacon = true; 99 priv->reconfig_beacon = true;
100 priv->rearm_ani = true;
101 }
100 102
101 if (bss_conf->assoc) { 103 if (bss_conf->assoc) {
102 priv->rearm_ani = true; 104 priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
257 259
258 ath9k_htc_ps_wakeup(priv); 260 ath9k_htc_ps_wakeup(priv);
259 261
262 ath9k_htc_stop_ani(priv);
260 del_timer_sync(&priv->tx.cleanup_timer); 263 del_timer_sync(&priv->tx.cleanup_timer);
261 ath9k_htc_tx_drain(priv); 264 ath9k_htc_tx_drain(priv);
262 265
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
4948 if (!err) { 4948 if (!err) {
4949 /* only set 2G bandwidth using bw_cap command */ 4949 /* only set 2G bandwidth using bw_cap command */
4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G); 4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); 4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, 4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
4953 sizeof(band_bwcap)); 4953 sizeof(band_bwcap));
4954 } else { 4954 } else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index fa858d548d13..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); 611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
612 612
613 if (IWL_MVM_BT_COEX_CORUNNING) { 613 if (IWL_MVM_BT_COEX_CORUNNING) {
614 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | 614 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
615 BT_VALID_CORUN_LUT_40); 615 BT_VALID_CORUN_LUT_40);
616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); 616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
617 } 617 }
618 618
619 if (IWL_MVM_BT_COEX_MPLUT) { 619 if (IWL_MVM_BT_COEX_MPLUT) {
620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); 620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
621 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); 621 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
622 } 622 }
623 623
624 if (mvm->cfg->bt_shared_single_ant) 624 if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
183 * this number of packets were received (typically 1) 183 * this number of packets were received (typically 1)
184 * @passive2active: is auto switching from passive to active during scan allowed 184 * @passive2active: is auto switching from passive to active during scan allowed
185 * @rxchain_sel_flags: RXON_RX_CHAIN_* 185 * @rxchain_sel_flags: RXON_RX_CHAIN_*
186 * @max_out_time: in usecs, max out of serving channel time 186 * @max_out_time: in TUs, max out of serving channel time
187 * @suspend_time: how long to pause scan when returning to service channel: 187 * @suspend_time: how long to pause scan when returning to service channel:
188 * bits 0-19: beacon interal in usecs (suspend before executing) 188 * bits 0-19: beacon interal in TUs (suspend before executing)
189 * bits 20-23: reserved 189 * bits 20-23: reserved
190 * bits 24-31: number of beacons (suspend between channels) 190 * bits 24-31: number of beacons (suspend between channels)
191 * @rxon_flags: RXON_FLG_* 191 * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
383 * @quiet_plcp_th: quiet channel num of packets threshold 383 * @quiet_plcp_th: quiet channel num of packets threshold
384 * @good_CRC_th: passive to active promotion threshold 384 * @good_CRC_th: passive to active promotion threshold
385 * @rx_chain: RXON rx chain. 385 * @rx_chain: RXON rx chain.
386 * @max_out_time: max uSec to be out of assoceated channel 386 * @max_out_time: max TUs to be out of assoceated channel
387 * @suspend_time: pause scan this long when returning to service channel 387 * @suspend_time: pause scan this TUs when returning to service channel
388 * @flags: RXON flags 388 * @flags: RXON flags
389 * @filter_flags: RXONfilter 389 * @filter_flags: RXONfilter
390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. 390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f0cebf12c7b8..b41dc84e9431 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1009 1009
1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); 1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1011 if (ret) 1011 if (ret)
1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1013} 1013}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1024 return; 1024 return;
1025 1025
1026 ieee80211_iterate_active_interfaces( 1026 ieee80211_iterate_active_interfaces_atomic(
1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1028 iwl_mvm_mc_iface_iterator, &iter_data); 1028 iwl_mvm_mc_iface_iterator, &iter_data);
1029} 1029}
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1807 1807
1808 mutex_lock(&mvm->mutex); 1808 mutex_lock(&mvm->mutex);
1809 1809
1810 if (!iwl_mvm_is_idle(mvm)) {
1811 ret = -EBUSY;
1812 goto out;
1813 }
1814
1810 switch (mvm->scan_status) { 1815 switch (mvm->scan_status) {
1811 case IWL_MVM_SCAN_OS: 1816 case IWL_MVM_SCAN_OS:
1812 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); 1817 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1003 return mvmvif->low_latency; 1003 return mvmvif->low_latency;
1004} 1004}
1005 1005
1006/* Assoc status */
1007bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
1008
1006/* Thermal management and CT-kill */ 1009/* Thermal management and CT-kill */
1007void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1010void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1008void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1011void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 9f52c5b3f0ec..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1010 return; 1010 return;
1011 } 1011 }
1012 1012
1013#ifdef CPTCFG_MAC80211_DEBUGFS 1013#ifdef CONFIG_MAC80211_DEBUGFS
1014 /* Disable last tx check if we are debugging with fixed rate */ 1014 /* Disable last tx check if we are debugging with fixed rate */
1015 if (lq_sta->dbg_fixed_rate) { 1015 if (lq_sta->dbg_fixed_rate) {
1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); 1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
277 IEEE80211_IFACE_ITER_NORMAL, 277 IEEE80211_IFACE_ITER_NORMAL,
278 iwl_mvm_scan_condition_iterator, 278 iwl_mvm_scan_condition_iterator,
279 &global_bound); 279 &global_bound);
280 /*
281 * Under low latency traffic passive scan is fragmented meaning
282 * that dwell on a particular channel will be fragmented. Each fragment
283 * dwell time is 20ms and fragments period is 105ms. Skipping to next
284 * channel will be delayed by the same period - 105ms. So suspend_time
285 * parameter describing both fragments and channels skipping periods is
286 * set to 105ms. This value is chosen so that overall passive scan
287 * duration will not be too long. Max_out_time in this case is set to
288 * 70ms, so for active scanning operating channel will be left for 70ms
289 * while for passive still for 20ms (fragment dwell).
290 */
291 if (global_bound) {
292 if (!iwl_mvm_low_latency(mvm)) {
293 params->suspend_time = ieee80211_tu_to_usec(100);
294 params->max_out_time = ieee80211_tu_to_usec(600);
295 } else {
296 params->suspend_time = ieee80211_tu_to_usec(105);
297 /* P2P doesn't support fragmented passive scan, so
298 * configure max_out_time to be at least longest dwell
299 * time for passive scan.
300 */
301 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
302 params->max_out_time = ieee80211_tu_to_usec(70);
303 params->passive_fragmented = true;
304 } else {
305 u32 passive_dwell;
306 280
307 /* 281 if (!global_bound)
308 * Use band G so that passive channel dwell time 282 goto not_bound;
309 * will be assigned with maximum value. 283
310 */ 284 params->suspend_time = 100;
311 band = IEEE80211_BAND_2GHZ; 285 params->max_out_time = 600;
312 passive_dwell = iwl_mvm_get_passive_dwell(band); 286
313 params->max_out_time = 287 if (iwl_mvm_low_latency(mvm)) {
314 ieee80211_tu_to_usec(passive_dwell); 288 params->suspend_time = 250;
315 } 289 params->max_out_time = 250;
316 }
317 } 290 }
318 291
292not_bound:
293
319 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 294 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
320 if (params->passive_fragmented) 295 params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
321 params->dwell[band].passive = 20;
322 else
323 params->dwell[band].passive =
324 iwl_mvm_get_passive_dwell(band);
325 params->dwell[band].active = iwl_mvm_get_active_dwell(band, 296 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
326 n_ssids); 297 n_ssids);
327 } 298 }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
761 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 732 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
762 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 733 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
763 int head = 0; 734 int head = 0;
764 int tail = band_2ghz + band_5ghz; 735 int tail = band_2ghz + band_5ghz - 1;
765 u32 ssid_bitmap; 736 u32 ssid_bitmap;
766 int cmd_len; 737 int cmd_len;
767 int ret; 738 int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
644 644
645 return result; 645 return result;
646} 646}
647
648static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
649{
650 bool *idle = _data;
651
652 if (!vif->bss_conf.idle)
653 *idle = false;
654}
655
656bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
657{
658 bool idle = true;
659
660 ieee80211_iterate_active_interfaces_atomic(
661 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
662 iwl_mvm_idle_iter, &idle);
663
664 return idle;
665}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1749 * PCI Tx retries from interfering with C3 CPU state */ 1749 * PCI Tx retries from interfering with C3 CPU state */
1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1751 1751
1752 trans->dev = &pdev->dev;
1753 trans_pcie->pci_dev = pdev;
1754 iwl_disable_interrupts(trans);
1755
1752 err = pci_enable_msi(pdev); 1756 err = pci_enable_msi(pdev);
1753 if (err) { 1757 if (err) {
1754 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 1758 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1760 } 1764 }
1761 } 1765 }
1762 1766
1763 trans->dev = &pdev->dev;
1764 trans_pcie->pci_dev = pdev;
1765 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 1767 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1766 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 1768 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1767 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 1769 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1787 goto out_pci_disable_msi; 1789 goto out_pci_disable_msi;
1788 } 1790 }
1789 1791
1790 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1791
1792 if (iwl_pcie_alloc_ict(trans)) 1792 if (iwl_pcie_alloc_ict(trans))
1793 goto out_free_cmd_pool; 1793 goto out_free_cmd_pool;
1794 1794
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1800 goto out_free_ict; 1800 goto out_free_ict;
1801 } 1801 }
1802 1802
1803 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1804
1803 return trans; 1805 return trans;
1804 1806
1805out_free_ict: 1807out_free_ict:
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
226 grant_ref_t rx_ring_ref); 226 grant_ref_t rx_ring_ref);
227 227
228/* Check for SKBs from frontend and schedule backend processing */ 228/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_check_rx_xenvif(struct xenvif *vif); 229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
230 230
231/* Prevent the device from generating any further traffic. */ 231/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 232void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
75 work_done = xenvif_tx_action(vif, budget); 75 work_done = xenvif_tx_action(vif, budget);
76 76
77 if (work_done < budget) { 77 if (work_done < budget) {
78 int more_to_do = 0; 78 napi_complete(napi);
79 unsigned long flags; 79 xenvif_napi_schedule_or_enable_events(vif);
80
81 /* It is necessary to disable IRQ before calling
82 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
83 * lose event from the frontend.
84 *
85 * Consider:
86 * RING_HAS_UNCONSUMED_REQUESTS
87 * <frontend generates event to trigger napi_schedule>
88 * __napi_complete
89 *
90 * This handler is still in scheduled state so the
91 * event has no effect at all. After __napi_complete
92 * this handler is descheduled and cannot get
93 * scheduled again. We lose event in this case and the ring
94 * will be completely stalled.
95 */
96
97 local_irq_save(flags);
98
99 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
100 if (!more_to_do)
101 __napi_complete(napi);
102
103 local_irq_restore(flags);
104 } 80 }
105 81
106 return work_done; 82 return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
194 enable_irq(vif->tx_irq); 170 enable_irq(vif->tx_irq);
195 if (vif->tx_irq != vif->rx_irq) 171 if (vif->tx_irq != vif->rx_irq)
196 enable_irq(vif->rx_irq); 172 enable_irq(vif->rx_irq);
197 xenvif_check_rx_xenvif(vif); 173 xenvif_napi_schedule_or_enable_events(vif);
198} 174}
199 175
200static void xenvif_down(struct xenvif *vif) 176static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) 107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
108{ 108{
109 u16 pending_idx = ubuf->desc; 109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 110 struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
323} 323}
324 324
325/* 325/*
326 * Find the grant ref for a given frag in a chain of struct ubuf_info's
327 * skb: the skb itself
328 * i: the frag's number
329 * ubuf: a pointer to an element in the chain. It should not be NULL
330 *
331 * Returns a pointer to the element in the chain where the page were found. If
332 * not found, returns NULL.
333 * See the definition of callback_struct in common.h for more details about
334 * the chain.
335 */
336static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i,
338 const struct ubuf_info *ubuf)
339{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
341
342 do {
343 u16 pending_idx = ubuf->desc;
344
345 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx])
347 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf);
350
351 return ubuf;
352}
353
354/*
326 * Prepare an SKB to be transmitted to the frontend. 355 * Prepare an SKB to be transmitted to the frontend.
327 * 356 *
328 * This function is responsible for allocating grant operations, meta 357 * This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
346 int head = 1; 375 int head = 1;
347 int old_meta_prod; 376 int old_meta_prod;
348 int gso_type; 377 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; 378 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; 379 const struct ubuf_info *const head_ubuf = ubuf;
351 struct xenvif *foreign_vif = NULL;
352 380
353 old_meta_prod = npo->meta_prod; 381 old_meta_prod = npo->meta_prod;
354 382
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
386 npo->copy_off = 0; 414 npo->copy_off = 0;
387 npo->copy_gref = req->gref; 415 npo->copy_gref = req->gref;
388 416
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
402 data = skb->data; 417 data = skb->data;
403 while (data < skb_tail_pointer(skb)) { 418 while (data < skb_tail_pointer(skb)) {
404 unsigned int offset = offset_in_page(data); 419 unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
415 } 430 }
416 431
417 for (i = 0; i < nr_frags; i++) { 432 for (i = 0; i < nr_frags; i++) {
433 /* This variable also signals whether foreign_gref has a real
434 * value or not.
435 */
436 struct xenvif *foreign_vif = NULL;
437 grant_ref_t foreign_gref;
438
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
440 (ubuf->callback == &xenvif_zerocopy_callback)) {
441 const struct ubuf_info *const startpoint = ubuf;
442
443 /* Ideally ubuf points to the chain element which
444 * belongs to this frag. Or if frags were removed from
445 * the beginning, then shortly before it.
446 */
447 ubuf = xenvif_find_gref(skb, i, ubuf);
448
449 /* Try again from the beginning of the list, if we
450 * haven't tried from there. This only makes sense in
451 * the unlikely event of reordering the original frags.
452 * For injected local pages it's an unnecessary second
453 * run.
454 */
455 if (unlikely(!ubuf) && startpoint != head_ubuf)
456 ubuf = xenvif_find_gref(skb, i, head_ubuf);
457
458 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc;
460
461 foreign_vif = ubuf_to_vif(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will
465 * iterate again if a local page were added to
466 * the end. Using head_ubuf here prevents the
467 * second search on the chain. Or the original
468 * frags changed order, but that's less likely.
469 * In any way, ubuf shouldn't be NULL.
470 */
471 ubuf = ubuf->ctx ?
472 (struct ubuf_info *) ubuf->ctx :
473 head_ubuf;
474 } else
475 /* This frag was a local page, added to the
476 * array after the skb left netback.
477 */
478 ubuf = head_ubuf;
479 }
418 xenvif_gop_frag_copy(vif, skb, npo, 480 xenvif_gop_frag_copy(vif, skb, npo,
419 skb_frag_page(&skb_shinfo(skb)->frags[i]), 481 skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 skb_frag_size(&skb_shinfo(skb)->frags[i]), 482 skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 skb_shinfo(skb)->frags[i].page_offset, 483 skb_shinfo(skb)->frags[i].page_offset,
422 &head, 484 &head,
423 foreign_vif, 485 foreign_vif,
424 foreign_grefs[i]); 486 foreign_vif ? foreign_gref : UINT_MAX);
425 } 487 }
426 488
427 return npo->meta_prod - old_meta_prod; 489 return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
654 notify_remote_via_irq(vif->rx_irq); 716 notify_remote_via_irq(vif->rx_irq);
655} 717}
656 718
657void xenvif_check_rx_xenvif(struct xenvif *vif) 719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
658{ 720{
659 int more_to_do; 721 int more_to_do;
660 722
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
688{ 750{
689 struct xenvif *vif = (struct xenvif *)data; 751 struct xenvif *vif = (struct xenvif *)data;
690 tx_add_credit(vif); 752 tx_add_credit(vif);
691 xenvif_check_rx_xenvif(vif); 753 xenvif_napi_schedule_or_enable_events(vif);
692} 754}
693 755
694static void xenvif_tx_err(struct xenvif *vif, 756static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 6d4ee22708c9..32e969d95319 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop)
1831 if (!found) 1831 if (!found)
1832 return -ENODEV; 1832 return -ENODEV;
1833 1833
1834 /* At early boot, bail out and defer setup to of_init() */
1835 if (!of_kset)
1836 return found ? 0 : -ENODEV;
1837
1834 /* Update the sysfs attribute */ 1838 /* Update the sysfs attribute */
1835 sysfs_remove_bin_file(&np->kobj, &oldprop->attr); 1839 sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
1836 __of_add_property_sysfs(np, newprop); 1840 __of_add_property_sysfs(np, newprop);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 58499277903a..6efc2ec5e4db 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
282 return WRONG_BUS_FREQUENCY; 282 return WRONG_BUS_FREQUENCY;
283 } 283 }
284 284
285 bsp = ctrl->pci_dev->bus->cur_bus_speed; 285 bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
286 msp = ctrl->pci_dev->bus->max_bus_speed; 286 msp = ctrl->pci_dev->subordinate->max_bus_speed;
287 287
288 /* Check if there are other slots or devices on the same bus */ 288 /* Check if there are other slots or devices on the same bus */
289 if (!list_empty(&ctrl->pci_dev->subordinate->devices)) 289 if (!list_empty(&ctrl->pci_dev->subordinate->devices))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7325d43bf030..759475ef6ff3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
3067 if (!pci_is_pcie(dev)) 3067 if (!pci_is_pcie(dev))
3068 return 1; 3068 return 1;
3069 3069
3070 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); 3070 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3071 PCI_EXP_DEVSTA_TRPND);
3071} 3072}
3072EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3073EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3073 3074
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3109 return 0; 3110 return 0;
3110 3111
3111 /* Wait for Transaction Pending bit clean */ 3112 /* Wait for Transaction Pending bit clean */
3112 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3113 if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
3113 goto clear; 3114 goto clear;
3114 3115
3115 dev_err(&dev->dev, "transaction is not cleared; " 3116 dev_err(&dev->dev, "transaction is not cleared; "
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
523 return GPIOF_DIR_IN; 523 return GPIOF_DIR_IN;
524} 524}
525 525
526static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
527{
528 return pinctrl_gpio_direction_input(chip->base + offset);
529}
530
531static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
532 int value)
533{
534 return pinctrl_gpio_direction_output(chip->base + offset);
535}
536
537static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) 526static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
538{ 527{
539 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); 528 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
568 wmt_clearbits(data, reg_data_out, BIT(bit)); 557 wmt_clearbits(data, reg_data_out, BIT(bit));
569} 558}
570 559
560static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
561{
562 return pinctrl_gpio_direction_input(chip->base + offset);
563}
564
565static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
566 int value)
567{
568 wmt_gpio_set_value(chip, offset, value);
569 return pinctrl_gpio_direction_output(chip->base + offset);
570}
571
571static struct gpio_chip wmt_gpio_chip = { 572static struct gpio_chip wmt_gpio_chip = {
572 .label = "gpio-wmt", 573 .label = "gpio-wmt",
573 .owner = THIS_MODULE, 574 .owner = THIS_MODULE,
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
6 6
7config PTP_1588_CLOCK 7config PTP_1588_CLOCK
8 tristate "PTP clock support" 8 tristate "PTP clock support"
9 depends on NET
9 select PPS 10 select PPS
10 select NET_PTP_CLASSIFY 11 select NET_PTP_CLASSIFY
11 help 12 help
@@ -74,7 +75,7 @@ config DP83640_PHY
74config PTP_1588_CLOCK_PCH 75config PTP_1588_CLOCK_PCH
75 tristate "Intel PCH EG20T as PTP clock" 76 tristate "Intel PCH EG20T as PTP clock"
76 depends on X86 || COMPILE_TEST 77 depends on X86 || COMPILE_TEST
77 depends on HAS_IOMEM 78 depends on HAS_IOMEM && NET
78 select PTP_1588_CLOCK 79 select PTP_1588_CLOCK
79 help 80 help
80 This driver adds support for using the PCH EG20T as a PTP 81 This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
1621 list_del(&rphy->list); 1621 list_del(&rphy->list);
1622 mutex_unlock(&sas_host->lock); 1622 mutex_unlock(&sas_host->lock);
1623 1623
1624 sas_bsg_remove(shost, rphy);
1625
1626 transport_destroy_device(dev); 1624 transport_destroy_device(dev);
1627 1625
1628 put_device(dev); 1626 put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1681 } 1679 }
1682 1680
1683 sas_rphy_unlink(rphy); 1681 sas_rphy_unlink(rphy);
1682 sas_bsg_remove(NULL, rphy);
1684 transport_remove_device(dev); 1683 transport_remove_device(dev);
1685 device_del(dev); 1684 device_del(dev);
1686} 1685}
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index fc67f564f02c..788ed9b59b4e 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,10 +1,12 @@
1# 1#
2# Makefile for the SuperH specific drivers. 2# Makefile for the SuperH specific drivers.
3# 3#
4obj-y := intc/ 4obj-$(CONFIG_SUPERH) += intc/
5obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/
6ifneq ($(CONFIG_COMMON_CLK),y)
7obj-$(CONFIG_HAVE_CLK) += clk/
8endif
9obj-$(CONFIG_MAPLE) += maple/
10obj-$(CONFIG_SUPERHYWAY) += superhyway/
5 11
6obj-$(CONFIG_HAVE_CLK) += clk/ 12obj-y += pm_runtime.o
7obj-$(CONFIG_MAPLE) += maple/
8obj-$(CONFIG_SUPERHYWAY) += superhyway/
9
10obj-y += pm_runtime.o
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 8afa5a4589f2..10c65eb51f85 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
50 .con_ids = { NULL, }, 50 .con_ids = { NULL, },
51}; 51};
52 52
53static bool default_pm_on;
54
53static int __init sh_pm_runtime_init(void) 55static int __init sh_pm_runtime_init(void)
54{ 56{
57 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
58 if (!of_machine_is_compatible("renesas,emev2") &&
59 !of_machine_is_compatible("renesas,r7s72100") &&
60 !of_machine_is_compatible("renesas,r8a73a4") &&
61 !of_machine_is_compatible("renesas,r8a7740") &&
62 !of_machine_is_compatible("renesas,r8a7778") &&
63 !of_machine_is_compatible("renesas,r8a7779") &&
64 !of_machine_is_compatible("renesas,r8a7790") &&
65 !of_machine_is_compatible("renesas,r8a7791") &&
66 !of_machine_is_compatible("renesas,sh7372") &&
67 !of_machine_is_compatible("renesas,sh73a0"))
68 return 0;
69 }
70
71 default_pm_on = true;
55 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 72 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
56 return 0; 73 return 0;
57} 74}
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
59 76
60static int __init sh_pm_runtime_late_init(void) 77static int __init sh_pm_runtime_late_init(void)
61{ 78{
62 pm_genpd_poweroff_unused(); 79 if (default_pm_on)
80 pm_genpd_poweroff_unused();
63 return 0; 81 return 0;
64} 82}
65late_initcall(sh_pm_runtime_late_init); 83late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 713af4806f26..f6759dc0153b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
29 struct sg_table *sgt; 29 struct sg_table *sgt;
30 void *buf, *pbuf; 30 void *buf, *pbuf;
31 31
32 /*
33 * Some DMA controllers have problems transferring buffers that are
34 * not multiple of 4 bytes. So we truncate the transfer so that it
35 * is suitable for such controllers, and handle the trailing bytes
36 * manually after the DMA completes.
37 *
38 * REVISIT: It would be better if this information could be
39 * retrieved directly from the DMA device in a similar way than
40 * ->copy_align etc. is done.
41 */
42 len = ALIGN(drv_data->len, 4);
43
44 if (dir == DMA_TO_DEVICE) { 32 if (dir == DMA_TO_DEVICE) {
45 dmadev = drv_data->tx_chan->device->dev; 33 dmadev = drv_data->tx_chan->device->dev;
46 sgt = &drv_data->tx_sgt; 34 sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
144 if (!error) { 132 if (!error) {
145 pxa2xx_spi_unmap_dma_buffers(drv_data); 133 pxa2xx_spi_unmap_dma_buffers(drv_data);
146 134
147 /* Handle the last bytes of unaligned transfer */
148 drv_data->tx += drv_data->tx_map_len; 135 drv_data->tx += drv_data->tx_map_len;
149 drv_data->write(drv_data);
150
151 drv_data->rx += drv_data->rx_map_len; 136 drv_data->rx += drv_data->rx_map_len;
152 drv_data->read(drv_data);
153 137
154 msg->actual_length += drv_data->len; 138 msg->actual_length += drv_data->len;
155 msg->state = pxa2xx_spi_next_transfer(drv_data); 139 msg->state = pxa2xx_spi_next_transfer(drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index b032e8885e24..78c66e3c53ed 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
734 int ret; 734 int ret;
735 735
736 ret = pm_runtime_get_sync(&pdev->dev); 736 ret = pm_runtime_get_sync(&pdev->dev);
737 if (ret) 737 if (ret < 0)
738 return ret; 738 return ret;
739 739
740 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 740 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4eb9bf02996c..939edf473235 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580 spi->master->set_cs(spi, !enable); 580 spi->master->set_cs(spi, !enable);
581} 581}
582 582
583#ifdef CONFIG_HAS_DMA
583static int spi_map_buf(struct spi_master *master, struct device *dev, 584static int spi_map_buf(struct spi_master *master, struct device *dev,
584 struct sg_table *sgt, void *buf, size_t len, 585 struct sg_table *sgt, void *buf, size_t len,
585 enum dma_data_direction dir) 586 enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
637 } 638 }
638} 639}
639 640
640static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 641static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
641{ 642{
642 struct device *tx_dev, *rx_dev; 643 struct device *tx_dev, *rx_dev;
643 struct spi_transfer *xfer; 644 struct spi_transfer *xfer;
644 void *tmp;
645 unsigned int max_tx, max_rx;
646 int ret; 645 int ret;
647 646
648 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
649 max_tx = 0;
650 max_rx = 0;
651
652 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
653 if ((master->flags & SPI_MASTER_MUST_TX) &&
654 !xfer->tx_buf)
655 max_tx = max(xfer->len, max_tx);
656 if ((master->flags & SPI_MASTER_MUST_RX) &&
657 !xfer->rx_buf)
658 max_rx = max(xfer->len, max_rx);
659 }
660
661 if (max_tx) {
662 tmp = krealloc(master->dummy_tx, max_tx,
663 GFP_KERNEL | GFP_DMA);
664 if (!tmp)
665 return -ENOMEM;
666 master->dummy_tx = tmp;
667 memset(tmp, 0, max_tx);
668 }
669
670 if (max_rx) {
671 tmp = krealloc(master->dummy_rx, max_rx,
672 GFP_KERNEL | GFP_DMA);
673 if (!tmp)
674 return -ENOMEM;
675 master->dummy_rx = tmp;
676 }
677
678 if (max_tx || max_rx) {
679 list_for_each_entry(xfer, &msg->transfers,
680 transfer_list) {
681 if (!xfer->tx_buf)
682 xfer->tx_buf = master->dummy_tx;
683 if (!xfer->rx_buf)
684 xfer->rx_buf = master->dummy_rx;
685 }
686 }
687 }
688
689 if (!master->can_dma) 647 if (!master->can_dma)
690 return 0; 648 return 0;
691 649
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
742 700
743 return 0; 701 return 0;
744} 702}
703#else /* !CONFIG_HAS_DMA */
704static inline int __spi_map_msg(struct spi_master *master,
705 struct spi_message *msg)
706{
707 return 0;
708}
709
710static inline int spi_unmap_msg(struct spi_master *master,
711 struct spi_message *msg)
712{
713 return 0;
714}
715#endif /* !CONFIG_HAS_DMA */
716
717static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
718{
719 struct spi_transfer *xfer;
720 void *tmp;
721 unsigned int max_tx, max_rx;
722
723 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
724 max_tx = 0;
725 max_rx = 0;
726
727 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
728 if ((master->flags & SPI_MASTER_MUST_TX) &&
729 !xfer->tx_buf)
730 max_tx = max(xfer->len, max_tx);
731 if ((master->flags & SPI_MASTER_MUST_RX) &&
732 !xfer->rx_buf)
733 max_rx = max(xfer->len, max_rx);
734 }
735
736 if (max_tx) {
737 tmp = krealloc(master->dummy_tx, max_tx,
738 GFP_KERNEL | GFP_DMA);
739 if (!tmp)
740 return -ENOMEM;
741 master->dummy_tx = tmp;
742 memset(tmp, 0, max_tx);
743 }
744
745 if (max_rx) {
746 tmp = krealloc(master->dummy_rx, max_rx,
747 GFP_KERNEL | GFP_DMA);
748 if (!tmp)
749 return -ENOMEM;
750 master->dummy_rx = tmp;
751 }
752
753 if (max_tx || max_rx) {
754 list_for_each_entry(xfer, &msg->transfers,
755 transfer_list) {
756 if (!xfer->tx_buf)
757 xfer->tx_buf = master->dummy_tx;
758 if (!xfer->rx_buf)
759 xfer->rx_buf = master->dummy_rx;
760 }
761 }
762 }
763
764 return __spi_map_msg(master, msg);
765}
745 766
746/* 767/*
747 * spi_transfer_one_message - Default implementation of transfer_one_message() 768 * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
1151{ 1172{
1152 int ret; 1173 int ret;
1153 1174
1154 master->queued = true;
1155 master->transfer = spi_queued_transfer; 1175 master->transfer = spi_queued_transfer;
1156 if (!master->transfer_one_message) 1176 if (!master->transfer_one_message)
1157 master->transfer_one_message = spi_transfer_one_message; 1177 master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
1162 dev_err(&master->dev, "problem initializing queue\n"); 1182 dev_err(&master->dev, "problem initializing queue\n");
1163 goto err_init_queue; 1183 goto err_init_queue;
1164 } 1184 }
1185 master->queued = true;
1165 ret = spi_start_queue(master); 1186 ret = spi_start_queue(master);
1166 if (ret) { 1187 if (ret) {
1167 dev_err(&master->dev, "problem starting queue\n"); 1188 dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
1171 return 0; 1192 return 0;
1172 1193
1173err_start_queue: 1194err_start_queue:
1174err_init_queue:
1175 spi_destroy_queue(master); 1195 spi_destroy_queue(master);
1196err_init_queue:
1176 return ret; 1197 return ret;
1177} 1198}
1178 1199
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1756 */ 1777 */
1757int spi_setup(struct spi_device *spi) 1778int spi_setup(struct spi_device *spi)
1758{ 1779{
1759 unsigned bad_bits; 1780 unsigned bad_bits, ugly_bits;
1760 int status = 0; 1781 int status = 0;
1761 1782
1762 /* check mode to prevent that DUAL and QUAD set at the same time 1783 /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
1776 * that aren't supported with their current master 1797 * that aren't supported with their current master
1777 */ 1798 */
1778 bad_bits = spi->mode & ~spi->master->mode_bits; 1799 bad_bits = spi->mode & ~spi->master->mode_bits;
1800 ugly_bits = bad_bits &
1801 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1802 if (ugly_bits) {
1803 dev_warn(&spi->dev,
1804 "setup: ignoring unsupported mode bits %x\n",
1805 ugly_bits);
1806 spi->mode &= ~ugly_bits;
1807 bad_bits &= ~ugly_bits;
1808 }
1779 if (bad_bits) { 1809 if (bad_bits) {
1780 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1810 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1781 bad_bits); 1811 bad_bits);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 4144a75e5f71..c270c9ae6d27 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
517 of_node_put(port); 517 of_node_put(port);
518 if (port == imx_crtc->port) { 518 if (port == imx_crtc->port) {
519 ret = of_graph_parse_endpoint(ep, &endpoint); 519 ret = of_graph_parse_endpoint(ep, &endpoint);
520 return ret ? ret : endpoint.id; 520 return ret ? ret : endpoint.port;
521 } 521 }
522 } while (ep); 522 } while (ep);
523 523
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
675 if (!remote || !of_device_is_available(remote)) { 675 if (!remote || !of_device_is_available(remote)) {
676 of_node_put(remote); 676 of_node_put(remote);
677 continue; 677 continue;
678 } else if (!of_device_is_available(remote->parent)) {
679 dev_warn(&pdev->dev, "parent device of %s is not available\n",
680 remote->full_name);
681 of_node_put(remote);
682 continue;
678 } 683 }
679 684
680 ret = imx_drm_add_component(&pdev->dev, remote); 685 ret = imx_drm_add_component(&pdev->dev, remote);
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 575533f4fd64..a23f4f773146 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
582 tve->dev = dev; 582 tve->dev = dev;
583 spin_lock_init(&tve->lock); 583 spin_lock_init(&tve->lock);
584 584
585 ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); 585 ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
586 if (ddc_node) { 586 if (ddc_node) {
587 tve->ddc = of_find_i2c_adapter_by_node(ddc_node); 587 tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
588 of_node_put(ddc_node); 588 of_node_put(ddc_node);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8c101cbbee97..acc8184c46cd 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq); 1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq);
1248 struct vpfe_video_device *video = fh->video; 1248 struct vpfe_video_device *video = fh->video;
1249 1249
1250 if (!vb2_is_streaming(vq))
1251 return 0;
1252 /* release all active buffers */ 1250 /* release all active buffers */
1251 if (video->cur_frm == video->next_frm) {
1252 vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
1253 } else {
1254 if (video->cur_frm != NULL)
1255 vb2_buffer_done(&video->cur_frm->vb,
1256 VB2_BUF_STATE_ERROR);
1257 if (video->next_frm != NULL)
1258 vb2_buffer_done(&video->next_frm->vb,
1259 VB2_BUF_STATE_ERROR);
1260 }
1261
1253 while (!list_empty(&video->dma_queue)) { 1262 while (!list_empty(&video->dma_queue)) {
1254 video->next_frm = list_entry(video->dma_queue.next, 1263 video->next_frm = list_entry(video->dma_queue.next,
1255 struct vpfe_cap_buffer, list); 1264 struct vpfe_cap_buffer, list);
diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
index b3d2cc729657..4ba569258498 100644
--- a/drivers/staging/media/sn9c102/sn9c102_devtable.h
+++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, 48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ 49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, 50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
51#endif
52 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, 51 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
53 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, 52 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
54#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
55 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, 53 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
56 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, 54 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
57 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, 55 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 57eca7a45672..4fe751f7c2bf 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
953#endif /* CONFIG_8723AU_P2P */ 953#endif /* CONFIG_8723AU_P2P */
954 954
955 rtw_scan_abort23a(padapter); 955 rtw_scan_abort23a(padapter);
956 /* set this at the end */
957 padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
958 956
959 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); 957 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
960 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); 958 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index c49160e477d8..07e542e5d156 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
26 if (addr == RECV_BULK_IN_ADDR) { 26 if (addr == RECV_BULK_IN_ADDR) {
27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); 27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
28 } else if (addr == RECV_INT_IN_ADDR) { 28 } else if (addr == RECV_INT_IN_ADDR) {
29 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); 29 pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
30 } else if (addr < HW_QUEUE_ENTRY) { 30 } else if (addr < HW_QUEUE_ENTRY) {
31 ep_num = pdvobj->Queue2Pipe[addr]; 31 ep_num = pdvobj->Queue2Pipe[addr];
32 pipe = usb_sndbulkpipe(pusbd, ep_num); 32 pipe = usb_sndbulkpipe(pusbd, ep_num);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 78cab13bbb1b..46588c85d39b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1593 * Initiator is expecting a NopIN ping reply.. 1593 * Initiator is expecting a NopIN ping reply..
1594 */ 1594 */
1595 if (hdr->itt != RESERVED_ITT) { 1595 if (hdr->itt != RESERVED_ITT) {
1596 BUG_ON(!cmd); 1596 if (!cmd)
1597 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1598 (unsigned char *)hdr);
1597 1599
1598 spin_lock_bh(&conn->cmd_lock); 1600 spin_lock_bh(&conn->cmd_lock);
1599 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1601 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 6960f22909ae..302eb3b78715 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -775,6 +775,7 @@ struct iscsi_np {
775 int np_ip_proto; 775 int np_ip_proto;
776 int np_sock_type; 776 int np_sock_type;
777 enum np_thread_state_table np_thread_state; 777 enum np_thread_state_table np_thread_state;
778 bool enabled;
778 enum iscsi_timer_flags_table np_login_timer_flags; 779 enum iscsi_timer_flags_table np_login_timer_flags;
779 u32 np_exports; 780 u32 np_exports;
780 enum np_flags_table np_flags; 781 enum np_flags_table np_flags;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8739b98f6f93..ca31fa1b8a4b 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
436 } 436 }
437 off = mrdsl % PAGE_SIZE; 437 off = mrdsl % PAGE_SIZE;
438 if (!off) 438 if (!off)
439 return 0; 439 goto check_prot;
440 440
441 if (mrdsl < PAGE_SIZE) 441 if (mrdsl < PAGE_SIZE)
442 mrdsl = PAGE_SIZE; 442 mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
452 ISCSI_LOGIN_STATUS_NO_RESOURCES); 452 ISCSI_LOGIN_STATUS_NO_RESOURCES);
453 return -1; 453 return -1;
454 } 454 }
455 /*
456 * ISER currently requires that ImmediateData + Unsolicited
457 * Data be disabled when protection / signature MRs are enabled.
458 */
459check_prot:
460 if (sess->se_sess->sup_prot_ops &
461 (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
462 TARGET_PROT_DOUT_INSERT)) {
463
464 sprintf(buf, "ImmediateData=No");
465 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
466 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
467 ISCSI_LOGIN_STATUS_NO_RESOURCES);
468 return -1;
469 }
470
471 sprintf(buf, "InitialR2T=Yes");
472 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
473 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
474 ISCSI_LOGIN_STATUS_NO_RESOURCES);
475 return -1;
476 }
477 pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
478 " T10-PI enabled ISER session\n");
479 }
455 } 480 }
456 481
457 return 0; 482 return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
984 } 1009 }
985 1010
986 np->np_transport = t; 1011 np->np_transport = t;
1012 np->enabled = true;
987 return 0; 1013 return 0;
988} 1014}
989 1015
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index eb96b20dc09e..ca1811858afd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
184 return; 184 return;
185 } 185 }
186 186
187 tpg_np->tpg_np->enabled = false;
187 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 188 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
188} 189}
189 190
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 65001e133670..26416c15d65c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
798 pr_err("emulate_write_cache not supported for pSCSI\n"); 798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL; 799 return -EINVAL;
800 } 800 }
801 if (dev->transport->get_write_cache) { 801 if (flag &&
802 pr_warn("emulate_write_cache cannot be changed when underlying" 802 dev->transport->get_write_cache) {
803 " HW reports WriteCacheEnabled, ignoring request\n"); 803 pr_err("emulate_write_cache not supported for this device\n");
804 return 0; 804 return -EINVAL;
805 } 805 }
806 806
807 dev->dev_attrib.emulate_write_cache = flag; 807 dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
936 return 0; 936 return 0;
937 } 937 }
938 if (!dev->transport->init_prot || !dev->transport->free_prot) { 938 if (!dev->transport->init_prot || !dev->transport->free_prot) {
939 /* 0 is only allowed value for non-supporting backends */
940 if (flag == 0)
941 return 0;
942
939 pr_err("DIF protection not supported by backend: %s\n", 943 pr_err("DIF protection not supported by backend: %s\n",
940 dev->transport->name); 944 dev->transport->name);
941 return -ENOSYS; 945 return -ENOSYS;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d4b98690a736..789aa9eb0a1e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
1113 init_completion(&cmd->cmd_wait_comp); 1113 init_completion(&cmd->cmd_wait_comp);
1114 init_completion(&cmd->task_stop_comp); 1114 init_completion(&cmd->task_stop_comp);
1115 spin_lock_init(&cmd->t_state_lock); 1115 spin_lock_init(&cmd->t_state_lock);
1116 kref_init(&cmd->cmd_kref);
1116 cmd->transport_state = CMD_T_DEV_ACTIVE; 1117 cmd->transport_state = CMD_T_DEV_ACTIVE;
1117 1118
1118 cmd->se_tfo = tfo; 1119 cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2357 unsigned long flags; 2358 unsigned long flags;
2358 int ret = 0; 2359 int ret = 0;
2359 2360
2360 kref_init(&se_cmd->cmd_kref);
2361 /* 2361 /*
2362 * Add a second kref if the fabric caller is expecting to handle 2362 * Add a second kref if the fabric caller is expecting to handle
2363 * fabric acknowledgement that requires two target_put_sess_cmd() 2363 * fabric acknowledgement that requires two target_put_sess_cmd()
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 01cf37f212c3..f5fd515b2bee 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
90{ 90{
91 struct fc_frame *fp; 91 struct fc_frame *fp;
92 struct fc_lport *lport; 92 struct fc_lport *lport;
93 struct se_session *se_sess; 93 struct ft_sess *sess;
94 94
95 if (!cmd) 95 if (!cmd)
96 return; 96 return;
97 se_sess = cmd->sess->se_sess; 97 sess = cmd->sess;
98 fp = cmd->req_frame; 98 fp = cmd->req_frame;
99 lport = fr_dev(fp); 99 lport = fr_dev(fp);
100 if (fr_seq(fp)) 100 if (fr_seq(fp))
101 lport->tt.seq_release(fr_seq(fp)); 101 lport->tt.seq_release(fr_seq(fp));
102 fc_frame_free(fp); 102 fc_frame_free(fp);
103 percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 103 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
104 ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 104 ft_sess_put(sess); /* undo get from lookup at recv */
105} 105}
106 106
107void ft_release_cmd(struct se_cmd *se_cmd) 107void ft_release_cmd(struct se_cmd *se_cmd)
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 1c8c6cc6de30..4b0eff6da674 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
130{ 130{
131 _enter(""); 131 _enter("");
132 132
133 /* Break the callbacks here so that we do it after the final ACK is
134 * received. The step number here must match the final number in
135 * afs_deliver_cb_callback().
136 */
137 if (call->unmarshall == 6) {
138 ASSERT(call->server && call->count && call->request);
139 afs_break_callbacks(call->server, call->count, call->request);
140 }
141
133 afs_put_server(call->server); 142 afs_put_server(call->server);
134 call->server = NULL; 143 call->server = NULL;
135 kfree(call->buffer); 144 kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
272 _debug("trailer"); 281 _debug("trailer");
273 if (skb->len != 0) 282 if (skb->len != 0)
274 return -EBADMSG; 283 return -EBADMSG;
284
285 /* Record that the message was unmarshalled successfully so
286 * that the call destructor can know do the callback breaking
287 * work, even if the final ACK isn't received.
288 *
289 * If the step number changes, then afs_cm_destructor() must be
290 * updated also.
291 */
292 call->unmarshall++;
293 case 6:
275 break; 294 break;
276 } 295 }
277 296
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index be75b500005d..590b55f46d61 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -75,7 +75,7 @@ struct afs_call {
75 const struct afs_call_type *type; /* type of call */ 75 const struct afs_call_type *type; /* type of call */
76 const struct afs_wait_mode *wait_mode; /* completion wait mode */ 76 const struct afs_wait_mode *wait_mode; /* completion wait mode */
77 wait_queue_head_t waitq; /* processes awaiting completion */ 77 wait_queue_head_t waitq; /* processes awaiting completion */
78 work_func_t async_workfn; 78 void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
79 struct work_struct async_work; /* asynchronous work processor */ 79 struct work_struct async_work; /* asynchronous work processor */
80 struct work_struct work; /* actual work processor */ 80 struct work_struct work; /* actual work processor */
81 struct sk_buff_head rx_queue; /* received packets */ 81 struct sk_buff_head rx_queue; /* received packets */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ef943df73b8c..03a3beb17004 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
25static int afs_wait_for_call_to_complete(struct afs_call *); 25static int afs_wait_for_call_to_complete(struct afs_call *);
26static void afs_wake_up_async_call(struct afs_call *); 26static void afs_wake_up_async_call(struct afs_call *);
27static int afs_dont_wait_for_call_to_complete(struct afs_call *); 27static int afs_dont_wait_for_call_to_complete(struct afs_call *);
28static void afs_process_async_call(struct work_struct *); 28static void afs_process_async_call(struct afs_call *);
29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); 29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); 30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
31 31
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
58static struct sk_buff_head afs_incoming_calls; 58static struct sk_buff_head afs_incoming_calls;
59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); 59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
60 60
61static void afs_async_workfn(struct work_struct *work)
62{
63 struct afs_call *call = container_of(work, struct afs_call, async_work);
64
65 call->async_workfn(call);
66}
67
61/* 68/*
62 * open an RxRPC socket and bind it to be a server for callback notifications 69 * open an RxRPC socket and bind it to be a server for callback notifications
63 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 70 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -184,6 +191,28 @@ static void afs_free_call(struct afs_call *call)
184} 191}
185 192
186/* 193/*
194 * End a call but do not free it
195 */
196static void afs_end_call_nofree(struct afs_call *call)
197{
198 if (call->rxcall) {
199 rxrpc_kernel_end_call(call->rxcall);
200 call->rxcall = NULL;
201 }
202 if (call->type->destructor)
203 call->type->destructor(call);
204}
205
206/*
207 * End a call and free it
208 */
209static void afs_end_call(struct afs_call *call)
210{
211 afs_end_call_nofree(call);
212 afs_free_call(call);
213}
214
215/*
187 * allocate a call with flat request and reply buffers 216 * allocate a call with flat request and reply buffers
188 */ 217 */
189struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, 218struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
326 atomic_read(&afs_outstanding_calls)); 355 atomic_read(&afs_outstanding_calls));
327 356
328 call->wait_mode = wait_mode; 357 call->wait_mode = wait_mode;
329 INIT_WORK(&call->async_work, afs_process_async_call); 358 call->async_workfn = afs_process_async_call;
359 INIT_WORK(&call->async_work, afs_async_workfn);
330 360
331 memset(&srx, 0, sizeof(srx)); 361 memset(&srx, 0, sizeof(srx));
332 srx.srx_family = AF_RXRPC; 362 srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
383 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); 413 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
384 while ((skb = skb_dequeue(&call->rx_queue))) 414 while ((skb = skb_dequeue(&call->rx_queue)))
385 afs_free_skb(skb); 415 afs_free_skb(skb);
386 rxrpc_kernel_end_call(rxcall);
387 call->rxcall = NULL;
388error_kill_call: 416error_kill_call:
389 call->type->destructor(call); 417 afs_end_call(call);
390 afs_free_call(call);
391 _leave(" = %d", ret); 418 _leave(" = %d", ret);
392 return ret; 419 return ret;
393} 420}
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
509 if (call->state >= AFS_CALL_COMPLETE) { 536 if (call->state >= AFS_CALL_COMPLETE) {
510 while ((skb = skb_dequeue(&call->rx_queue))) 537 while ((skb = skb_dequeue(&call->rx_queue)))
511 afs_free_skb(skb); 538 afs_free_skb(skb);
512 if (call->incoming) { 539 if (call->incoming)
513 rxrpc_kernel_end_call(call->rxcall); 540 afs_end_call(call);
514 call->rxcall = NULL;
515 call->type->destructor(call);
516 afs_free_call(call);
517 }
518 } 541 }
519 542
520 _leave(""); 543 _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
564 } 587 }
565 588
566 _debug("call complete"); 589 _debug("call complete");
567 rxrpc_kernel_end_call(call->rxcall); 590 afs_end_call(call);
568 call->rxcall = NULL;
569 call->type->destructor(call);
570 afs_free_call(call);
571 _leave(" = %d", ret); 591 _leave(" = %d", ret);
572 return ret; 592 return ret;
573} 593}
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
603/* 623/*
604 * delete an asynchronous call 624 * delete an asynchronous call
605 */ 625 */
606static void afs_delete_async_call(struct work_struct *work) 626static void afs_delete_async_call(struct afs_call *call)
607{ 627{
608 struct afs_call *call =
609 container_of(work, struct afs_call, async_work);
610
611 _enter(""); 628 _enter("");
612 629
613 afs_free_call(call); 630 afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
620 * - on a multiple-thread workqueue this work item may try to run on several 637 * - on a multiple-thread workqueue this work item may try to run on several
621 * CPUs at the same time 638 * CPUs at the same time
622 */ 639 */
623static void afs_process_async_call(struct work_struct *work) 640static void afs_process_async_call(struct afs_call *call)
624{ 641{
625 struct afs_call *call =
626 container_of(work, struct afs_call, async_work);
627
628 _enter(""); 642 _enter("");
629 643
630 if (!skb_queue_empty(&call->rx_queue)) 644 if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
637 call->reply = NULL; 651 call->reply = NULL;
638 652
639 /* kill the call */ 653 /* kill the call */
640 rxrpc_kernel_end_call(call->rxcall); 654 afs_end_call_nofree(call);
641 call->rxcall = NULL;
642 if (call->type->destructor)
643 call->type->destructor(call);
644 655
645 /* we can't just delete the call because the work item may be 656 /* we can't just delete the call because the work item may be
646 * queued */ 657 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
663 call->reply_size += len; 674 call->reply_size += len;
664} 675}
665 676
666static void afs_async_workfn(struct work_struct *work)
667{
668 struct afs_call *call = container_of(work, struct afs_call, async_work);
669
670 call->async_workfn(work);
671}
672
673/* 677/*
674 * accept the backlog of incoming calls 678 * accept the backlog of incoming calls
675 */ 679 */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
790 _debug("oom"); 794 _debug("oom");
791 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 795 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
792 default: 796 default:
793 rxrpc_kernel_end_call(call->rxcall); 797 afs_end_call(call);
794 call->rxcall = NULL;
795 call->type->destructor(call);
796 afs_free_call(call);
797 _leave(" [error]"); 798 _leave(" [error]");
798 return; 799 return;
799 } 800 }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
823 call->state = AFS_CALL_AWAIT_ACK; 824 call->state = AFS_CALL_AWAIT_ACK;
824 n = rxrpc_kernel_send_data(call->rxcall, &msg, len); 825 n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
825 if (n >= 0) { 826 if (n >= 0) {
827 /* Success */
826 _leave(" [replied]"); 828 _leave(" [replied]");
827 return; 829 return;
828 } 830 }
831
829 if (n == -ENOMEM) { 832 if (n == -ENOMEM) {
830 _debug("oom"); 833 _debug("oom");
831 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 834 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
832 } 835 }
833 rxrpc_kernel_end_call(call->rxcall); 836 afs_end_call(call);
834 call->rxcall = NULL;
835 call->type->destructor(call);
836 afs_free_call(call);
837 _leave(" [error]"); 837 _leave(" [error]");
838} 838}
839 839
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2ad7de94efef..2f6d7b13b5bd 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3120,6 +3120,8 @@ process_slot:
3120 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 3120 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
3121 u64 skip = 0; 3121 u64 skip = 0;
3122 u64 trim = 0; 3122 u64 trim = 0;
3123 u64 aligned_end = 0;
3124
3123 if (off > key.offset) { 3125 if (off > key.offset) {
3124 skip = off - key.offset; 3126 skip = off - key.offset;
3125 new_key.offset += skip; 3127 new_key.offset += skip;
@@ -3136,9 +3138,11 @@ process_slot:
3136 size -= skip + trim; 3138 size -= skip + trim;
3137 datal -= skip + trim; 3139 datal -= skip + trim;
3138 3140
3141 aligned_end = ALIGN(new_key.offset + datal,
3142 root->sectorsize);
3139 ret = btrfs_drop_extents(trans, root, inode, 3143 ret = btrfs_drop_extents(trans, root, inode,
3140 new_key.offset, 3144 new_key.offset,
3141 new_key.offset + datal, 3145 aligned_end,
3142 1); 3146 1);
3143 if (ret) { 3147 if (ret) {
3144 if (ret != -EOPNOTSUPP) 3148 if (ret != -EOPNOTSUPP)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index eb6537a08c1b..fd38b5053479 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1668,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
1668 goto out; 1668 goto out;
1669 } 1669 }
1670 1670
1671 if (key.type == BTRFS_INODE_REF_KEY) { 1671 if (found_key.type == BTRFS_INODE_REF_KEY) {
1672 struct btrfs_inode_ref *iref; 1672 struct btrfs_inode_ref *iref;
1673 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1673 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1674 struct btrfs_inode_ref); 1674 struct btrfs_inode_ref);
diff --git a/fs/dcache.c b/fs/dcache.c
index 42ae01eefc07..be2bea834bf4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -441,42 +441,12 @@ void d_drop(struct dentry *dentry)
441} 441}
442EXPORT_SYMBOL(d_drop); 442EXPORT_SYMBOL(d_drop);
443 443
444/* 444static void __dentry_kill(struct dentry *dentry)
445 * Finish off a dentry we've decided to kill.
446 * dentry->d_lock must be held, returns with it unlocked.
447 * If ref is non-zero, then decrement the refcount too.
448 * Returns dentry requiring refcount drop, or NULL if we're done.
449 */
450static struct dentry *
451dentry_kill(struct dentry *dentry, int unlock_on_failure)
452 __releases(dentry->d_lock)
453{ 445{
454 struct inode *inode;
455 struct dentry *parent = NULL; 446 struct dentry *parent = NULL;
456 bool can_free = true; 447 bool can_free = true;
457
458 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
459 can_free = dentry->d_flags & DCACHE_MAY_FREE;
460 spin_unlock(&dentry->d_lock);
461 goto out;
462 }
463
464 inode = dentry->d_inode;
465 if (inode && !spin_trylock(&inode->i_lock)) {
466relock:
467 if (unlock_on_failure) {
468 spin_unlock(&dentry->d_lock);
469 cpu_relax();
470 }
471 return dentry; /* try again with same dentry */
472 }
473 if (!IS_ROOT(dentry)) 448 if (!IS_ROOT(dentry))
474 parent = dentry->d_parent; 449 parent = dentry->d_parent;
475 if (parent && !spin_trylock(&parent->d_lock)) {
476 if (inode)
477 spin_unlock(&inode->i_lock);
478 goto relock;
479 }
480 450
481 /* 451 /*
482 * The dentry is now unrecoverably dead to the world. 452 * The dentry is now unrecoverably dead to the world.
@@ -520,9 +490,72 @@ relock:
520 can_free = false; 490 can_free = false;
521 } 491 }
522 spin_unlock(&dentry->d_lock); 492 spin_unlock(&dentry->d_lock);
523out:
524 if (likely(can_free)) 493 if (likely(can_free))
525 dentry_free(dentry); 494 dentry_free(dentry);
495}
496
497/*
498 * Finish off a dentry we've decided to kill.
499 * dentry->d_lock must be held, returns with it unlocked.
500 * If ref is non-zero, then decrement the refcount too.
501 * Returns dentry requiring refcount drop, or NULL if we're done.
502 */
503static struct dentry *dentry_kill(struct dentry *dentry)
504 __releases(dentry->d_lock)
505{
506 struct inode *inode = dentry->d_inode;
507 struct dentry *parent = NULL;
508
509 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
510 goto failed;
511
512 if (!IS_ROOT(dentry)) {
513 parent = dentry->d_parent;
514 if (unlikely(!spin_trylock(&parent->d_lock))) {
515 if (inode)
516 spin_unlock(&inode->i_lock);
517 goto failed;
518 }
519 }
520
521 __dentry_kill(dentry);
522 return parent;
523
524failed:
525 spin_unlock(&dentry->d_lock);
526 cpu_relax();
527 return dentry; /* try again with same dentry */
528}
529
530static inline struct dentry *lock_parent(struct dentry *dentry)
531{
532 struct dentry *parent = dentry->d_parent;
533 if (IS_ROOT(dentry))
534 return NULL;
535 if (likely(spin_trylock(&parent->d_lock)))
536 return parent;
537 spin_unlock(&dentry->d_lock);
538 rcu_read_lock();
539again:
540 parent = ACCESS_ONCE(dentry->d_parent);
541 spin_lock(&parent->d_lock);
542 /*
543 * We can't blindly lock dentry until we are sure
544 * that we won't violate the locking order.
545 * Any changes of dentry->d_parent must have
546 * been done with parent->d_lock held, so
547 * spin_lock() above is enough of a barrier
548 * for checking if it's still our child.
549 */
550 if (unlikely(parent != dentry->d_parent)) {
551 spin_unlock(&parent->d_lock);
552 goto again;
553 }
554 rcu_read_unlock();
555 if (parent != dentry)
556 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
557 else
558 parent = NULL;
526 return parent; 559 return parent;
527} 560}
528 561
@@ -579,7 +612,7 @@ repeat:
579 return; 612 return;
580 613
581kill_it: 614kill_it:
582 dentry = dentry_kill(dentry, 1); 615 dentry = dentry_kill(dentry);
583 if (dentry) 616 if (dentry)
584 goto repeat; 617 goto repeat;
585} 618}
@@ -797,8 +830,11 @@ static void shrink_dentry_list(struct list_head *list)
797 struct dentry *dentry, *parent; 830 struct dentry *dentry, *parent;
798 831
799 while (!list_empty(list)) { 832 while (!list_empty(list)) {
833 struct inode *inode;
800 dentry = list_entry(list->prev, struct dentry, d_lru); 834 dentry = list_entry(list->prev, struct dentry, d_lru);
801 spin_lock(&dentry->d_lock); 835 spin_lock(&dentry->d_lock);
836 parent = lock_parent(dentry);
837
802 /* 838 /*
803 * The dispose list is isolated and dentries are not accounted 839 * The dispose list is isolated and dentries are not accounted
804 * to the LRU here, so we can simply remove it from the list 840 * to the LRU here, so we can simply remove it from the list
@@ -812,26 +848,33 @@ static void shrink_dentry_list(struct list_head *list)
812 */ 848 */
813 if ((int)dentry->d_lockref.count > 0) { 849 if ((int)dentry->d_lockref.count > 0) {
814 spin_unlock(&dentry->d_lock); 850 spin_unlock(&dentry->d_lock);
851 if (parent)
852 spin_unlock(&parent->d_lock);
815 continue; 853 continue;
816 } 854 }
817 855
818 parent = dentry_kill(dentry, 0); 856
819 /* 857 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
820 * If dentry_kill returns NULL, we have nothing more to do. 858 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
821 */ 859 spin_unlock(&dentry->d_lock);
822 if (!parent) 860 if (parent)
861 spin_unlock(&parent->d_lock);
862 if (can_free)
863 dentry_free(dentry);
823 continue; 864 continue;
865 }
824 866
825 if (unlikely(parent == dentry)) { 867 inode = dentry->d_inode;
826 /* 868 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
827 * trylocks have failed and d_lock has been held the
828 * whole time, so it could not have been added to any
829 * other lists. Just add it back to the shrink list.
830 */
831 d_shrink_add(dentry, list); 869 d_shrink_add(dentry, list);
832 spin_unlock(&dentry->d_lock); 870 spin_unlock(&dentry->d_lock);
871 if (parent)
872 spin_unlock(&parent->d_lock);
833 continue; 873 continue;
834 } 874 }
875
876 __dentry_kill(dentry);
877
835 /* 878 /*
836 * We need to prune ancestors too. This is necessary to prevent 879 * We need to prune ancestors too. This is necessary to prevent
837 * quadratic behavior of shrink_dcache_parent(), but is also 880 * quadratic behavior of shrink_dcache_parent(), but is also
@@ -839,8 +882,26 @@ static void shrink_dentry_list(struct list_head *list)
839 * fragmentation. 882 * fragmentation.
840 */ 883 */
841 dentry = parent; 884 dentry = parent;
842 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) 885 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
843 dentry = dentry_kill(dentry, 1); 886 parent = lock_parent(dentry);
887 if (dentry->d_lockref.count != 1) {
888 dentry->d_lockref.count--;
889 spin_unlock(&dentry->d_lock);
890 if (parent)
891 spin_unlock(&parent->d_lock);
892 break;
893 }
894 inode = dentry->d_inode; /* can't be NULL */
895 if (unlikely(!spin_trylock(&inode->i_lock))) {
896 spin_unlock(&dentry->d_lock);
897 if (parent)
898 spin_unlock(&parent->d_lock);
899 cpu_relax();
900 continue;
901 }
902 __dentry_kill(dentry);
903 dentry = parent;
904 }
844 } 905 }
845} 906}
846 907
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e01ea4a14a01..5e9a80cfc3d8 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -610,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
610static int kernfs_fop_open(struct inode *inode, struct file *file) 610static int kernfs_fop_open(struct inode *inode, struct file *file)
611{ 611{
612 struct kernfs_node *kn = file->f_path.dentry->d_fsdata; 612 struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
613 struct kernfs_root *root = kernfs_root(kn);
613 const struct kernfs_ops *ops; 614 const struct kernfs_ops *ops;
614 struct kernfs_open_file *of; 615 struct kernfs_open_file *of;
615 bool has_read, has_write, has_mmap; 616 bool has_read, has_write, has_mmap;
@@ -624,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
624 has_write = ops->write || ops->mmap; 625 has_write = ops->write || ops->mmap;
625 has_mmap = ops->mmap; 626 has_mmap = ops->mmap;
626 627
627 /* check perms and supported operations */ 628 /* see the flag definition for details */
628 if ((file->f_mode & FMODE_WRITE) && 629 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
629 (!(inode->i_mode & S_IWUGO) || !has_write)) 630 if ((file->f_mode & FMODE_WRITE) &&
630 goto err_out; 631 (!(inode->i_mode & S_IWUGO) || !has_write))
632 goto err_out;
631 633
632 if ((file->f_mode & FMODE_READ) && 634 if ((file->f_mode & FMODE_READ) &&
633 (!(inode->i_mode & S_IRUGO) || !has_read)) 635 (!(inode->i_mode & S_IRUGO) || !has_read))
634 goto err_out; 636 goto err_out;
637 }
635 638
636 /* allocate a kernfs_open_file for the file */ 639 /* allocate a kernfs_open_file for the file */
637 error = -ENOMEM; 640 error = -ENOMEM;
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index b6f46013dddf..f66c66b9f182 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -590,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
590 add_to_mask(state, &state->groups->aces[i].perms); 590 add_to_mask(state, &state->groups->aces[i].perms);
591 } 591 }
592 592
593 if (!state->users->n && !state->groups->n) { 593 if (state->users->n || state->groups->n) {
594 pace++; 594 pace++;
595 pace->e_tag = ACL_MASK; 595 pace->e_tag = ACL_MASK;
596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); 596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 32b699bebb9c..9a77a5a21557 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3717,9 +3717,16 @@ out:
3717static __be32 3717static __be32
3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3719{ 3719{
3720 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3720 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3721
3722 if (check_for_locks(stp->st_file, lo))
3721 return nfserr_locks_held; 3723 return nfserr_locks_held;
3722 release_lock_stateid(stp); 3724 /*
3725 * Currently there's a 1-1 lock stateid<->lockowner
3726 * correspondance, and we have to delete the lockowner when we
3727 * delete the lock stateid:
3728 */
3729 unhash_lockowner(lo);
3723 return nfs_ok; 3730 return nfs_ok;
3724} 3731}
3725 3732
@@ -4159,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
4159 4166
4160 if (!same_owner_str(&lo->lo_owner, owner, clid)) 4167 if (!same_owner_str(&lo->lo_owner, owner, clid))
4161 return false; 4168 return false;
4169 if (list_empty(&lo->lo_owner.so_stateids)) {
4170 WARN_ON_ONCE(1);
4171 return false;
4172 }
4162 lst = list_first_entry(&lo->lo_owner.so_stateids, 4173 lst = list_first_entry(&lo->lo_owner.so_stateids,
4163 struct nfs4_ol_stateid, st_perstateowner); 4174 struct nfs4_ol_stateid, st_perstateowner);
4164 return lst->st_file->fi_inode == inode; 4175 return lst->st_file->fi_inode == inode;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index af3f7aa73e13..ee1f88419cb0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -472,11 +472,15 @@ bail:
472 472
473void dlm_destroy_master_caches(void) 473void dlm_destroy_master_caches(void)
474{ 474{
475 if (dlm_lockname_cache) 475 if (dlm_lockname_cache) {
476 kmem_cache_destroy(dlm_lockname_cache); 476 kmem_cache_destroy(dlm_lockname_cache);
477 dlm_lockname_cache = NULL;
478 }
477 479
478 if (dlm_lockres_cache) 480 if (dlm_lockres_cache) {
479 kmem_cache_destroy(dlm_lockres_cache); 481 kmem_cache_destroy(dlm_lockres_cache);
482 dlm_lockres_cache = NULL;
483 }
480} 484}
481 485
482static void dlm_lockres_release(struct kref *kref) 486static void dlm_lockres_release(struct kref *kref)
diff --git a/fs/splice.c b/fs/splice.c
index 9bc07d2b53cf..e246954ea48c 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1537,7 +1537,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1537 struct iovec iovstack[UIO_FASTIOV]; 1537 struct iovec iovstack[UIO_FASTIOV];
1538 struct iovec *iov = iovstack; 1538 struct iovec *iov = iovstack;
1539 struct iov_iter iter; 1539 struct iov_iter iter;
1540 ssize_t count = 0; 1540 ssize_t count;
1541 1541
1542 pipe = get_pipe_info(file); 1542 pipe = get_pipe_info(file);
1543 if (!pipe) 1543 if (!pipe)
@@ -1546,8 +1546,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1546 ret = rw_copy_check_uvector(READ, uiov, nr_segs, 1546 ret = rw_copy_check_uvector(READ, uiov, nr_segs,
1547 ARRAY_SIZE(iovstack), iovstack, &iov); 1547 ARRAY_SIZE(iovstack), iovstack, &iov);
1548 if (ret <= 0) 1548 if (ret <= 0)
1549 return ret; 1549 goto out;
1550 1550
1551 count = ret;
1551 iov_iter_init(&iter, iov, nr_segs, count, 0); 1552 iov_iter_init(&iter, iov, nr_segs, count, 0);
1552 1553
1553 sd.len = 0; 1554 sd.len = 0;
@@ -1560,6 +1561,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
1560 ret = __splice_from_pipe(pipe, &sd, pipe_to_user); 1561 ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
1561 pipe_unlock(pipe); 1562 pipe_unlock(pipe);
1562 1563
1564out:
1563 if (iov != iovstack) 1565 if (iov != iovstack)
1564 kfree(iov); 1566 kfree(iov);
1565 1567
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 28cc1acd5439..e9ef59b3abb1 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
47 ssize_t count; 47 ssize_t count;
48 char *buf; 48 char *buf;
49 49
50 /* acquire buffer and ensure that it's >= PAGE_SIZE */ 50 /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
51 count = seq_get_buf(sf, &buf); 51 count = seq_get_buf(sf, &buf);
52 if (count < PAGE_SIZE) { 52 if (count < PAGE_SIZE) {
53 seq_commit(sf, -1); 53 seq_commit(sf, -1);
54 return 0; 54 return 0;
55 } 55 }
56 memset(buf, 0, PAGE_SIZE);
56 57
57 /* 58 /*
58 * Invoke show(). Control may reach here via seq file lseek even 59 * Invoke show(). Control may reach here via seq file lseek even
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index a66ad6196f59..8794423f7efb 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -63,7 +63,8 @@ int __init sysfs_init(void)
63{ 63{
64 int err; 64 int err;
65 65
66 sysfs_root = kernfs_create_root(NULL, 0, NULL); 66 sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
67 NULL);
67 if (IS_ERR(sysfs_root)) 68 if (IS_ERR(sysfs_root))
68 return PTR_ERR(sysfs_root); 69 return PTR_ERR(sysfs_root);
69 70
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 1399e187d425..753e467aa1a5 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata(
237 237
238 if (!lsn) 238 if (!lsn)
239 return 0; 239 return 0;
240 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 240 return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
241} 241}
242 242
243const struct export_operations xfs_export_operations = { 243const struct export_operations xfs_export_operations = {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 951a2321ee01..830c1c937b88 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -155,7 +155,7 @@ xfs_dir_fsync(
155 155
156 if (!lsn) 156 if (!lsn)
157 return 0; 157 return 0;
158 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 158 return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
159} 159}
160 160
161STATIC int 161STATIC int
@@ -295,7 +295,7 @@ xfs_file_aio_read(
295 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 295 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
296 296
297 if (inode->i_mapping->nrpages) { 297 if (inode->i_mapping->nrpages) {
298 ret = -filemap_write_and_wait_range( 298 ret = filemap_write_and_wait_range(
299 VFS_I(ip)->i_mapping, 299 VFS_I(ip)->i_mapping,
300 pos, -1); 300 pos, -1);
301 if (ret) { 301 if (ret) {
@@ -837,7 +837,7 @@ xfs_file_fallocate(
837 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 837 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
838 838
839 if (offset & blksize_mask || len & blksize_mask) { 839 if (offset & blksize_mask || len & blksize_mask) {
840 error = -EINVAL; 840 error = EINVAL;
841 goto out_unlock; 841 goto out_unlock;
842 } 842 }
843 843
@@ -846,7 +846,7 @@ xfs_file_fallocate(
846 * in which case it is effectively a truncate operation 846 * in which case it is effectively a truncate operation
847 */ 847 */
848 if (offset + len >= i_size_read(inode)) { 848 if (offset + len >= i_size_read(inode)) {
849 error = -EINVAL; 849 error = EINVAL;
850 goto out_unlock; 850 goto out_unlock;
851 } 851 }
852 852
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 301ecbfcc0be..36d630319a27 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -72,8 +72,8 @@ xfs_initxattrs(
72 int error = 0; 72 int error = 0;
73 73
74 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 74 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
75 error = xfs_attr_set(ip, xattr->name, xattr->value, 75 error = -xfs_attr_set(ip, xattr->name, xattr->value,
76 xattr->value_len, ATTR_SECURE); 76 xattr->value_len, ATTR_SECURE);
77 if (error < 0) 77 if (error < 0)
78 break; 78 break;
79 } 79 }
@@ -93,8 +93,8 @@ xfs_init_security(
93 struct inode *dir, 93 struct inode *dir,
94 const struct qstr *qstr) 94 const struct qstr *qstr)
95{ 95{
96 return security_inode_init_security(inode, dir, qstr, 96 return -security_inode_init_security(inode, dir, qstr,
97 &xfs_initxattrs, NULL); 97 &xfs_initxattrs, NULL);
98} 98}
99 99
100static void 100static void
@@ -173,12 +173,12 @@ xfs_generic_create(
173 173
174#ifdef CONFIG_XFS_POSIX_ACL 174#ifdef CONFIG_XFS_POSIX_ACL
175 if (default_acl) { 175 if (default_acl) {
176 error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 176 error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
177 if (error) 177 if (error)
178 goto out_cleanup_inode; 178 goto out_cleanup_inode;
179 } 179 }
180 if (acl) { 180 if (acl) {
181 error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); 181 error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
182 if (error) 182 if (error)
183 goto out_cleanup_inode; 183 goto out_cleanup_inode;
184 } 184 }
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 348e4d2ed6e6..dc977b6e6a36 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -843,22 +843,17 @@ xfs_qm_init_quotainfo(
843 843
844 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 844 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
845 845
846 if ((error = list_lru_init(&qinf->qi_lru))) { 846 error = -list_lru_init(&qinf->qi_lru);
847 kmem_free(qinf); 847 if (error)
848 mp->m_quotainfo = NULL; 848 goto out_free_qinf;
849 return error;
850 }
851 849
852 /* 850 /*
853 * See if quotainodes are setup, and if not, allocate them, 851 * See if quotainodes are setup, and if not, allocate them,
854 * and change the superblock accordingly. 852 * and change the superblock accordingly.
855 */ 853 */
856 if ((error = xfs_qm_init_quotainos(mp))) { 854 error = xfs_qm_init_quotainos(mp);
857 list_lru_destroy(&qinf->qi_lru); 855 if (error)
858 kmem_free(qinf); 856 goto out_free_lru;
859 mp->m_quotainfo = NULL;
860 return error;
861 }
862 857
863 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 858 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
864 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 859 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
@@ -918,7 +913,7 @@ xfs_qm_init_quotainfo(
918 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 913 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
919 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 914 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
920 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 915 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
921 916
922 xfs_qm_dqdestroy(dqp); 917 xfs_qm_dqdestroy(dqp);
923 } else { 918 } else {
924 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 919 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -935,6 +930,13 @@ xfs_qm_init_quotainfo(
935 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; 930 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
936 register_shrinker(&qinf->qi_shrinker); 931 register_shrinker(&qinf->qi_shrinker);
937 return 0; 932 return 0;
933
934out_free_lru:
935 list_lru_destroy(&qinf->qi_lru);
936out_free_qinf:
937 kmem_free(qinf);
938 mp->m_quotainfo = NULL;
939 return error;
938} 940}
939 941
940 942
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 205376776377..3494eff8e4eb 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1433,11 +1433,11 @@ xfs_fs_fill_super(
1433 if (error) 1433 if (error)
1434 goto out_free_fsname; 1434 goto out_free_fsname;
1435 1435
1436 error = xfs_init_mount_workqueues(mp); 1436 error = -xfs_init_mount_workqueues(mp);
1437 if (error) 1437 if (error)
1438 goto out_close_devices; 1438 goto out_close_devices;
1439 1439
1440 error = xfs_icsb_init_counters(mp); 1440 error = -xfs_icsb_init_counters(mp);
1441 if (error) 1441 if (error)
1442 goto out_destroy_workqueues; 1442 goto out_destroy_workqueues;
1443 1443
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 63b5eff0a80f..fdd7e1b61f60 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -47,6 +47,7 @@ struct amba_driver {
47enum amba_vendor { 47enum amba_vendor {
48 AMBA_VENDOR_ARM = 0x41, 48 AMBA_VENDOR_ARM = 0x41,
49 AMBA_VENDOR_ST = 0x80, 49 AMBA_VENDOR_ST = 0x80,
50 AMBA_VENDOR_QCOM = 0x51,
50}; 51};
51 52
52extern struct bus_type amba_bustype; 53extern struct bus_type amba_bustype;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c2515851c1aa..d60904b9e505 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -473,6 +473,7 @@ struct cftype {
473}; 473};
474 474
475extern struct cgroup_root cgrp_dfl_root; 475extern struct cgroup_root cgrp_dfl_root;
476extern struct css_set init_css_set;
476 477
477static inline bool cgroup_on_dfl(const struct cgroup *cgrp) 478static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
478{ 479{
@@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
700 return task_css_check(task, subsys_id, false); 701 return task_css_check(task, subsys_id, false);
701} 702}
702 703
704/**
705 * task_css_is_root - test whether a task belongs to the root css
706 * @task: the target task
707 * @subsys_id: the target subsystem ID
708 *
709 * Test whether @task belongs to the root css on the specified subsystem.
710 * May be invoked in any context.
711 */
712static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
713{
714 return task_css_check(task, subsys_id, true) ==
715 init_css_set.subsys[subsys_id];
716}
717
703static inline struct cgroup *task_cgroup(struct task_struct *task, 718static inline struct cgroup *task_cgroup(struct task_struct *task,
704 int subsys_id) 719 int subsys_id)
705{ 720{
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8300fb87b84a..72cb0ddb9678 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
429typedef void (*dma_async_tx_callback)(void *dma_async_param); 429typedef void (*dma_async_tx_callback)(void *dma_async_param);
430 430
431struct dmaengine_unmap_data { 431struct dmaengine_unmap_data {
432 u8 map_cnt;
432 u8 to_cnt; 433 u8 to_cnt;
433 u8 from_cnt; 434 u8 from_cnt;
434 u8 bidi_cnt; 435 u8 bidi_cnt;
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 7c8b20b120ea..a9a53b12397b 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -56,6 +56,7 @@ struct macvlan_dev {
56 int numqueues; 56 int numqueues;
57 netdev_features_t tap_features; 57 netdev_features_t tap_features;
58 int minor; 58 int minor;
59 int nest_level;
59}; 60};
60 61
61static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 62static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 13bbbde00e68..b2acc4a1b13c 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
73/* found in socket.c */ 73/* found in socket.c */
74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
75 75
76static inline int is_vlan_dev(struct net_device *dev) 76static inline bool is_vlan_dev(struct net_device *dev)
77{ 77{
78 return dev->priv_flags & IFF_802_1Q_VLAN; 78 return dev->priv_flags & IFF_802_1Q_VLAN;
79} 79}
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
159#ifdef CONFIG_NET_POLL_CONTROLLER 159#ifdef CONFIG_NET_POLL_CONTROLLER
160 struct netpoll *netpoll; 160 struct netpoll *netpoll;
161#endif 161#endif
162 unsigned int nest_level;
162}; 163};
163 164
164static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) 165static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
197 const struct net_device *by_dev); 198 const struct net_device *by_dev);
198 199
199extern bool vlan_uses_dev(const struct net_device *dev); 200extern bool vlan_uses_dev(const struct net_device *dev);
201
202static inline int vlan_get_encap_level(struct net_device *dev)
203{
204 BUG_ON(!is_vlan_dev(dev));
205 return vlan_dev_priv(dev)->nest_level;
206}
200#else 207#else
201static inline struct net_device * 208static inline struct net_device *
202__vlan_find_dev_deep(struct net_device *real_dev, 209__vlan_find_dev_deep(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
263{ 270{
264 return false; 271 return false;
265} 272}
273static inline int vlan_get_encap_level(struct net_device *dev)
274{
275 BUG();
276 return 0;
277}
266#endif 278#endif
267 279
268static inline bool vlan_hw_offload_capable(netdev_features_t features, 280static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
483 */ 495 */
484 skb->protocol = htons(ETH_P_802_2); 496 skb->protocol = htons(ETH_P_802_2);
485} 497}
498
486#endif /* !(_LINUX_IF_VLAN_H_) */ 499#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index b0122dc6f96a..ca1be5c9136c 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -50,7 +50,24 @@ enum kernfs_node_flag {
50 50
51/* @flags for kernfs_create_root() */ 51/* @flags for kernfs_create_root() */
52enum kernfs_root_flag { 52enum kernfs_root_flag {
53 KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, 53 /*
54 * kernfs_nodes are created in the deactivated state and invisible.
55 * They require explicit kernfs_activate() to become visible. This
56 * can be used to make related nodes become visible atomically
57 * after all nodes are created successfully.
58 */
59 KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
60
61 /*
62 * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
63 * succeeds regardless of the RW permissions. sysfs had an extra
64 * layer of enforcement where open(2) fails with -EACCES regardless
65 * of CAP_DAC_OVERRIDE if the permission doesn't have the
66 * respective read or write access at all (none of S_IRUGO or
67 * S_IWUGO) or the respective operation isn't implemented. The
68 * following flag enables that behavior.
69 */
70 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
54}; 71};
55 72
56/* type-specific structures for kernfs_node union members */ 73/* type-specific structures for kernfs_node union members */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b66e7610d4ee..7040dc98ff8b 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
421 __be32 byte_count; 421 __be32 byte_count;
422}; 422};
423 423
424enum mlx4_update_qp_attr {
425 MLX4_UPDATE_QP_SMAC = 1 << 0,
426};
427
428struct mlx4_update_qp_params {
429 u8 smac_index;
430};
431
432int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
433 enum mlx4_update_qp_attr attr,
434 struct mlx4_update_qp_params *params);
424int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 435int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
425 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 436 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
426 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, 437 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
diff --git a/include/linux/net.h b/include/linux/net.h
index 94734a6259a4..17d83393afcc 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -248,24 +248,17 @@ do { \
248bool __net_get_random_once(void *buf, int nbytes, bool *done, 248bool __net_get_random_once(void *buf, int nbytes, bool *done,
249 struct static_key *done_key); 249 struct static_key *done_key);
250 250
251#ifdef HAVE_JUMP_LABEL
252#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
253 { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
254#else /* !HAVE_JUMP_LABEL */
255#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
256#endif /* HAVE_JUMP_LABEL */
257
258#define net_get_random_once(buf, nbytes) \ 251#define net_get_random_once(buf, nbytes) \
259 ({ \ 252 ({ \
260 bool ___ret = false; \ 253 bool ___ret = false; \
261 static bool ___done = false; \ 254 static bool ___done = false; \
262 static struct static_key ___done_key = \ 255 static struct static_key ___once_key = \
263 ___NET_RANDOM_STATIC_KEY_INIT; \ 256 STATIC_KEY_INIT_TRUE; \
264 if (!static_key_true(&___done_key)) \ 257 if (static_key_true(&___once_key)) \
265 ___ret = __net_get_random_once(buf, \ 258 ___ret = __net_get_random_once(buf, \
266 nbytes, \ 259 nbytes, \
267 &___done, \ 260 &___done, \
268 &___done_key); \ 261 &___once_key); \
269 ___ret; \ 262 ___ret; \
270 }) 263 })
271 264
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7ed3a3aa6604..b42d07b0390b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1144,6 +1144,7 @@ struct net_device_ops {
1144 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, 1144 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1145 struct net_device *dev, 1145 struct net_device *dev,
1146 void *priv); 1146 void *priv);
1147 int (*ndo_get_lock_subclass)(struct net_device *dev);
1147}; 1148};
1148 1149
1149/** 1150/**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
2950 2951
2951static inline void netif_addr_lock_nested(struct net_device *dev) 2952static inline void netif_addr_lock_nested(struct net_device *dev)
2952{ 2953{
2953 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); 2954 int subclass = SINGLE_DEPTH_NESTING;
2955
2956 if (dev->netdev_ops->ndo_get_lock_subclass)
2957 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
2958
2959 spin_lock_nested(&dev->addr_list_lock, subclass);
2954} 2960}
2955 2961
2956static inline void netif_addr_lock_bh(struct net_device *dev) 2962static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,10 +3056,19 @@ extern int weight_p;
3050extern int bpf_jit_enable; 3056extern int bpf_jit_enable;
3051 3057
3052bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 3058bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3059struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3060 struct list_head **iter);
3053struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 3061struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3054 struct list_head **iter); 3062 struct list_head **iter);
3055 3063
3056/* iterate through upper list, must be called under RCU read lock */ 3064/* iterate through upper list, must be called under RCU read lock */
3065#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3066 for (iter = &(dev)->adj_list.upper, \
3067 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3068 updev; \
3069 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3070
3071/* iterate through upper list, must be called under RCU read lock */
3057#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ 3072#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3058 for (iter = &(dev)->all_adj_list.upper, \ 3073 for (iter = &(dev)->all_adj_list.upper, \
3059 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ 3074 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3077 priv; \ 3092 priv; \
3078 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 3093 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3079 3094
3095void *netdev_lower_get_next(struct net_device *dev,
3096 struct list_head **iter);
3097#define netdev_for_each_lower_dev(dev, ldev, iter) \
3098 for (iter = &(dev)->adj_list.lower, \
3099 ldev = netdev_lower_get_next(dev, &(iter)); \
3100 ldev; \
3101 ldev = netdev_lower_get_next(dev, &(iter)))
3102
3080void *netdev_adjacent_get_private(struct list_head *adj_list); 3103void *netdev_adjacent_get_private(struct list_head *adj_list);
3081void *netdev_lower_get_first_private_rcu(struct net_device *dev); 3104void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3082struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 3105struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
3092void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 3115void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3093void *netdev_lower_dev_get_private(struct net_device *dev, 3116void *netdev_lower_dev_get_private(struct net_device *dev,
3094 struct net_device *lower_dev); 3117 struct net_device *lower_dev);
3118int dev_get_nest_level(struct net_device *dev,
3119 bool (*type_check)(struct net_device *dev));
3095int skb_checksum_help(struct sk_buff *skb); 3120int skb_checksum_help(struct sk_buff *skb);
3096struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3121struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3097 netdev_features_t features, bool tx_path); 3122 netdev_features_t features, bool tx_path);
@@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev);
3180void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3205void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3181 struct net_device *dev); 3206 struct net_device *dev);
3182 3207
3183netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 3208netdev_features_t netif_skb_features(struct sk_buff *skb);
3184 const struct net_device *dev);
3185static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
3186{
3187 return netif_skb_dev_features(skb, skb->dev);
3188}
3189 3209
3190static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3210static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3191{ 3211{
diff --git a/include/linux/of.h b/include/linux/of.h
index 3bad8d106e0e..e6f0988c1c68 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -349,7 +349,7 @@ int of_device_is_stdout_path(struct device_node *dn);
349 349
350#else /* CONFIG_OF */ 350#else /* CONFIG_OF */
351 351
352static inline const char* of_node_full_name(struct device_node *np) 352static inline const char* of_node_full_name(const struct device_node *np)
353{ 353{
354 return "<no-node>"; 354 return "<no-node>";
355} 355}
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 6fe8464ed767..881a7c3571f4 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
31#else /* CONFIG_OF */ 31#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 33{
34 return -ENOSYS; 34 /*
35 * Fall back to the non-DT function to register a bus.
36 * This way, we don't have to keep compat bits around in drivers.
37 */
38
39 return mdiobus_register(mdio);
35} 40}
36 41
37static inline struct phy_device *of_phy_find_device(struct device_node *phy_np) 42static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 41a13e70f41f..7944cdc27bed 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -10,7 +10,7 @@
10 10
11struct dma_chan; 11struct dma_chan;
12 12
13#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE) 13#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
14bool omap_dma_filter_fn(struct dma_chan *, void *); 14bool omap_dma_filter_fn(struct dma_chan *, void *);
15#else 15#else
16static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) 16static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3356abcfff18..3ef6ea12806a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -402,6 +402,8 @@ struct perf_event {
402 402
403 struct ring_buffer *rb; 403 struct ring_buffer *rb;
404 struct list_head rb_entry; 404 struct list_head rb_entry;
405 unsigned long rcu_batches;
406 int rcu_pending;
405 407
406 /* poll related */ 408 /* poll related */
407 wait_queue_head_t waitq; 409 wait_queue_head_t waitq;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 8e3e66ac0a52..953937ea5233 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6#include <linux/netdevice.h> 6#include <linux/netdevice.h>
7#include <linux/wait.h>
7#include <uapi/linux/rtnetlink.h> 8#include <uapi/linux/rtnetlink.h>
8 9
9extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); 10extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
22extern void rtnl_unlock(void); 23extern void rtnl_unlock(void);
23extern int rtnl_trylock(void); 24extern int rtnl_trylock(void);
24extern int rtnl_is_locked(void); 25extern int rtnl_is_locked(void);
26
27extern wait_queue_head_t netdev_unregistering_wq;
28extern struct mutex net_mutex;
29
25#ifdef CONFIG_PROVE_LOCKING 30#ifdef CONFIG_PROVE_LOCKING
26extern int lockdep_rtnl_is_held(void); 31extern int lockdep_rtnl_is_held(void);
27#else 32#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 25f54c79f757..221b2bde3723 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
220#define TASK_PARKED 512 220#define TASK_PARKED 512
221#define TASK_STATE_MAX 1024 221#define TASK_STATE_MAX 1024
222 222
223#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" 223#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
224 224
225extern char ___assert_task_state[1 - 2*!!( 225extern char ___assert_task_state[1 - 2*!!(
226 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 226 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
1153 * 1153 *
1154 * @dl_boosted tells if we are boosted due to DI. If so we are 1154 * @dl_boosted tells if we are boosted due to DI. If so we are
1155 * outside bandwidth enforcement mechanism (but only until we 1155 * outside bandwidth enforcement mechanism (but only until we
1156 * exit the critical section). 1156 * exit the critical section);
1157 *
1158 * @dl_yielded tells if task gave up the cpu before consuming
1159 * all its available runtime during the last job.
1157 */ 1160 */
1158 int dl_throttled, dl_new, dl_boosted; 1161 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1159 1162
1160 /* 1163 /*
1161 * Bandwidth enforcement timer. Each -deadline task has its 1164 * Bandwidth enforcement timer. Each -deadline task has its
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f3539a15c411..f856e5a746fa 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3669,6 +3669,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
3669void cfg80211_sched_scan_stopped(struct wiphy *wiphy); 3669void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
3670 3670
3671/** 3671/**
3672 * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
3673 *
3674 * @wiphy: the wiphy on which the scheduled scan stopped
3675 *
3676 * The driver can call this function to inform cfg80211 that the
3677 * scheduled scan had to be stopped, for whatever reason. The driver
3678 * is then called back via the sched_scan_stop operation when done.
3679 * This function should be called with rtnl locked.
3680 */
3681void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
3682
3683/**
3672 * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame 3684 * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
3673 * 3685 *
3674 * @wiphy: the wiphy reporting the BSS 3686 * @wiphy: the wiphy reporting the BSS
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 6c4f5eac98e7..216cecce65e9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
127void rt6_ifdown(struct net *net, struct net_device *dev); 127void rt6_ifdown(struct net *net, struct net_device *dev);
128void rt6_mtu_change(struct net_device *dev, unsigned int mtu); 128void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
129void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); 129void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
130void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
130 131
131 132
132/* 133/*
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 80f500a29498..b2704fd0ec80 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -20,6 +20,11 @@ struct local_ports {
20 int range[2]; 20 int range[2];
21}; 21};
22 22
23struct ping_group_range {
24 seqlock_t lock;
25 kgid_t range[2];
26};
27
23struct netns_ipv4 { 28struct netns_ipv4 {
24#ifdef CONFIG_SYSCTL 29#ifdef CONFIG_SYSCTL
25 struct ctl_table_header *forw_hdr; 30 struct ctl_table_header *forw_hdr;
@@ -66,13 +71,13 @@ struct netns_ipv4 {
66 int sysctl_icmp_ratemask; 71 int sysctl_icmp_ratemask;
67 int sysctl_icmp_errors_use_inbound_ifaddr; 72 int sysctl_icmp_errors_use_inbound_ifaddr;
68 73
69 struct local_ports sysctl_local_ports; 74 struct local_ports ip_local_ports;
70 75
71 int sysctl_tcp_ecn; 76 int sysctl_tcp_ecn;
72 int sysctl_ip_no_pmtu_disc; 77 int sysctl_ip_no_pmtu_disc;
73 int sysctl_ip_fwd_use_pmtu; 78 int sysctl_ip_fwd_use_pmtu;
74 79
75 kgid_t sysctl_ping_group_range[2]; 80 struct ping_group_range ping_group_range;
76 81
77 atomic_t dev_addr_genid; 82 atomic_t dev_addr_genid;
78 83
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6db66783d268..333640608087 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -697,9 +697,11 @@ __SYSCALL(__NR_finit_module, sys_finit_module)
697__SYSCALL(__NR_sched_setattr, sys_sched_setattr) 697__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
698#define __NR_sched_getattr 275 698#define __NR_sched_getattr 275
699__SYSCALL(__NR_sched_getattr, sys_sched_getattr) 699__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
700#define __NR_renameat2 276
701__SYSCALL(__NR_renameat2, sys_renameat2)
700 702
701#undef __NR_syscalls 703#undef __NR_syscalls
702#define __NR_syscalls 276 704#define __NR_syscalls 277
703 705
704/* 706/*
705 * All syscalls below here should go away really, 707 * All syscalls below here should go away really,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 1ba9d626aa83..194c1eab04d8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features {
3856 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested 3856 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
3857 * to work properly to suppport receiving regulatory hints from 3857 * to work properly to suppport receiving regulatory hints from
3858 * cellular base stations. 3858 * cellular base stations.
3859 * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
3860 * here to reserve the value for API/ABI compatibility)
3859 * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of 3861 * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
3860 * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station 3862 * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
3861 * mode 3863 * mode
@@ -3897,7 +3899,7 @@ enum nl80211_feature_flags {
3897 NL80211_FEATURE_HT_IBSS = 1 << 1, 3899 NL80211_FEATURE_HT_IBSS = 1 << 1,
3898 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, 3900 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
3899 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, 3901 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
3900 /* bit 4 is reserved - don't use */ 3902 NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4,
3901 NL80211_FEATURE_SAE = 1 << 5, 3903 NL80211_FEATURE_SAE = 1 << 5,
3902 NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6, 3904 NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6,
3903 NL80211_FEATURE_SCAN_FLUSH = 1 << 7, 3905 NL80211_FEATURE_SCAN_FLUSH = 1 << 7,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 11a03d67635a..3f1ca934a237 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -348,7 +348,7 @@ struct cgrp_cset_link {
348 * reference-counted, to improve performance when child cgroups 348 * reference-counted, to improve performance when child cgroups
349 * haven't been created. 349 * haven't been created.
350 */ 350 */
351static struct css_set init_css_set = { 351struct css_set init_css_set = {
352 .refcount = ATOMIC_INIT(1), 352 .refcount = ATOMIC_INIT(1),
353 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), 353 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
354 .tasks = LIST_HEAD_INIT(init_css_set.tasks), 354 .tasks = LIST_HEAD_INIT(init_css_set.tasks),
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 2bc4a2256444..345628c78b5b 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/mutex.h>
24 25
25/* 26/*
26 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is 27 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
@@ -42,9 +43,10 @@ enum freezer_state_flags {
42struct freezer { 43struct freezer {
43 struct cgroup_subsys_state css; 44 struct cgroup_subsys_state css;
44 unsigned int state; 45 unsigned int state;
45 spinlock_t lock;
46}; 46};
47 47
48static DEFINE_MUTEX(freezer_mutex);
49
48static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) 50static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
49{ 51{
50 return css ? container_of(css, struct freezer, css) : NULL; 52 return css ? container_of(css, struct freezer, css) : NULL;
@@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css)
93 if (!freezer) 95 if (!freezer)
94 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
95 97
96 spin_lock_init(&freezer->lock);
97 return &freezer->css; 98 return &freezer->css;
98} 99}
99 100
@@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
110 struct freezer *freezer = css_freezer(css); 111 struct freezer *freezer = css_freezer(css);
111 struct freezer *parent = parent_freezer(freezer); 112 struct freezer *parent = parent_freezer(freezer);
112 113
113 /* 114 mutex_lock(&freezer_mutex);
114 * The following double locking and freezing state inheritance
115 * guarantee that @cgroup can never escape ancestors' freezing
116 * states. See css_for_each_descendant_pre() for details.
117 */
118 if (parent)
119 spin_lock_irq(&parent->lock);
120 spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING);
121 115
122 freezer->state |= CGROUP_FREEZER_ONLINE; 116 freezer->state |= CGROUP_FREEZER_ONLINE;
123 117
@@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
126 atomic_inc(&system_freezing_cnt); 120 atomic_inc(&system_freezing_cnt);
127 } 121 }
128 122
129 spin_unlock(&freezer->lock); 123 mutex_unlock(&freezer_mutex);
130 if (parent)
131 spin_unlock_irq(&parent->lock);
132
133 return 0; 124 return 0;
134} 125}
135 126
@@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
144{ 135{
145 struct freezer *freezer = css_freezer(css); 136 struct freezer *freezer = css_freezer(css);
146 137
147 spin_lock_irq(&freezer->lock); 138 mutex_lock(&freezer_mutex);
148 139
149 if (freezer->state & CGROUP_FREEZING) 140 if (freezer->state & CGROUP_FREEZING)
150 atomic_dec(&system_freezing_cnt); 141 atomic_dec(&system_freezing_cnt);
151 142
152 freezer->state = 0; 143 freezer->state = 0;
153 144
154 spin_unlock_irq(&freezer->lock); 145 mutex_unlock(&freezer_mutex);
155} 146}
156 147
157static void freezer_css_free(struct cgroup_subsys_state *css) 148static void freezer_css_free(struct cgroup_subsys_state *css)
@@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
175 struct task_struct *task; 166 struct task_struct *task;
176 bool clear_frozen = false; 167 bool clear_frozen = false;
177 168
178 spin_lock_irq(&freezer->lock); 169 mutex_lock(&freezer_mutex);
179 170
180 /* 171 /*
181 * Make the new tasks conform to the current state of @new_css. 172 * Make the new tasks conform to the current state of @new_css.
@@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
197 } 188 }
198 } 189 }
199 190
200 spin_unlock_irq(&freezer->lock); 191 /* propagate FROZEN clearing upwards */
201
202 /*
203 * Propagate FROZEN clearing upwards. We may race with
204 * update_if_frozen(), but as long as both work bottom-up, either
205 * update_if_frozen() sees child's FROZEN cleared or we clear the
206 * parent's FROZEN later. No parent w/ !FROZEN children can be
207 * left FROZEN.
208 */
209 while (clear_frozen && (freezer = parent_freezer(freezer))) { 192 while (clear_frozen && (freezer = parent_freezer(freezer))) {
210 spin_lock_irq(&freezer->lock);
211 freezer->state &= ~CGROUP_FROZEN; 193 freezer->state &= ~CGROUP_FROZEN;
212 clear_frozen = freezer->state & CGROUP_FREEZING; 194 clear_frozen = freezer->state & CGROUP_FREEZING;
213 spin_unlock_irq(&freezer->lock);
214 } 195 }
196
197 mutex_unlock(&freezer_mutex);
215} 198}
216 199
217/** 200/**
@@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task)
228{ 211{
229 struct freezer *freezer; 212 struct freezer *freezer;
230 213
231 rcu_read_lock();
232 freezer = task_freezer(task);
233
234 /* 214 /*
235 * The root cgroup is non-freezable, so we can skip locking the 215 * The root cgroup is non-freezable, so we can skip locking the
236 * freezer. This is safe regardless of race with task migration. 216 * freezer. This is safe regardless of race with task migration.
@@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task)
238 * to do. If we lost and root is the new cgroup, noop is still the 218 * to do. If we lost and root is the new cgroup, noop is still the
239 * right thing to do. 219 * right thing to do.
240 */ 220 */
241 if (!parent_freezer(freezer)) 221 if (task_css_is_root(task, freezer_cgrp_id))
242 goto out; 222 return;
243 223
244 /* 224 mutex_lock(&freezer_mutex);
245 * Grab @freezer->lock and freeze @task after verifying @task still 225 rcu_read_lock();
246 * belongs to @freezer and it's freezing. The former is for the 226
247 * case where we have raced against task migration and lost and 227 freezer = task_freezer(task);
248 * @task is already in a different cgroup which may not be frozen. 228 if (freezer->state & CGROUP_FREEZING)
249 * This isn't strictly necessary as freeze_task() is allowed to be
250 * called spuriously but let's do it anyway for, if nothing else,
251 * documentation.
252 */
253 spin_lock_irq(&freezer->lock);
254 if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
255 freeze_task(task); 229 freeze_task(task);
256 spin_unlock_irq(&freezer->lock); 230
257out:
258 rcu_read_unlock(); 231 rcu_read_unlock();
232 mutex_unlock(&freezer_mutex);
259} 233}
260 234
261/** 235/**
@@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
281 struct css_task_iter it; 255 struct css_task_iter it;
282 struct task_struct *task; 256 struct task_struct *task;
283 257
284 WARN_ON_ONCE(!rcu_read_lock_held()); 258 lockdep_assert_held(&freezer_mutex);
285
286 spin_lock_irq(&freezer->lock);
287 259
288 if (!(freezer->state & CGROUP_FREEZING) || 260 if (!(freezer->state & CGROUP_FREEZING) ||
289 (freezer->state & CGROUP_FROZEN)) 261 (freezer->state & CGROUP_FROZEN))
290 goto out_unlock; 262 return;
291 263
292 /* are all (live) children frozen? */ 264 /* are all (live) children frozen? */
265 rcu_read_lock();
293 css_for_each_child(pos, css) { 266 css_for_each_child(pos, css) {
294 struct freezer *child = css_freezer(pos); 267 struct freezer *child = css_freezer(pos);
295 268
296 if ((child->state & CGROUP_FREEZER_ONLINE) && 269 if ((child->state & CGROUP_FREEZER_ONLINE) &&
297 !(child->state & CGROUP_FROZEN)) 270 !(child->state & CGROUP_FROZEN)) {
298 goto out_unlock; 271 rcu_read_unlock();
272 return;
273 }
299 } 274 }
275 rcu_read_unlock();
300 276
301 /* are all tasks frozen? */ 277 /* are all tasks frozen? */
302 css_task_iter_start(css, &it); 278 css_task_iter_start(css, &it);
@@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
317 freezer->state |= CGROUP_FROZEN; 293 freezer->state |= CGROUP_FROZEN;
318out_iter_end: 294out_iter_end:
319 css_task_iter_end(&it); 295 css_task_iter_end(&it);
320out_unlock:
321 spin_unlock_irq(&freezer->lock);
322} 296}
323 297
324static int freezer_read(struct seq_file *m, void *v) 298static int freezer_read(struct seq_file *m, void *v)
325{ 299{
326 struct cgroup_subsys_state *css = seq_css(m), *pos; 300 struct cgroup_subsys_state *css = seq_css(m), *pos;
327 301
302 mutex_lock(&freezer_mutex);
328 rcu_read_lock(); 303 rcu_read_lock();
329 304
330 /* update states bottom-up */ 305 /* update states bottom-up */
331 css_for_each_descendant_post(pos, css) 306 css_for_each_descendant_post(pos, css) {
307 if (!css_tryget(pos))
308 continue;
309 rcu_read_unlock();
310
332 update_if_frozen(pos); 311 update_if_frozen(pos);
333 312
313 rcu_read_lock();
314 css_put(pos);
315 }
316
334 rcu_read_unlock(); 317 rcu_read_unlock();
318 mutex_unlock(&freezer_mutex);
335 319
336 seq_puts(m, freezer_state_strs(css_freezer(css)->state)); 320 seq_puts(m, freezer_state_strs(css_freezer(css)->state));
337 seq_putc(m, '\n'); 321 seq_putc(m, '\n');
@@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
373 unsigned int state) 357 unsigned int state)
374{ 358{
375 /* also synchronizes against task migration, see freezer_attach() */ 359 /* also synchronizes against task migration, see freezer_attach() */
376 lockdep_assert_held(&freezer->lock); 360 lockdep_assert_held(&freezer_mutex);
377 361
378 if (!(freezer->state & CGROUP_FREEZER_ONLINE)) 362 if (!(freezer->state & CGROUP_FREEZER_ONLINE))
379 return; 363 return;
@@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
414 * descendant will try to inherit its parent's FREEZING state as 398 * descendant will try to inherit its parent's FREEZING state as
415 * CGROUP_FREEZING_PARENT. 399 * CGROUP_FREEZING_PARENT.
416 */ 400 */
401 mutex_lock(&freezer_mutex);
417 rcu_read_lock(); 402 rcu_read_lock();
418 css_for_each_descendant_pre(pos, &freezer->css) { 403 css_for_each_descendant_pre(pos, &freezer->css) {
419 struct freezer *pos_f = css_freezer(pos); 404 struct freezer *pos_f = css_freezer(pos);
420 struct freezer *parent = parent_freezer(pos_f); 405 struct freezer *parent = parent_freezer(pos_f);
421 406
422 spin_lock_irq(&pos_f->lock); 407 if (!css_tryget(pos))
408 continue;
409 rcu_read_unlock();
423 410
424 if (pos_f == freezer) { 411 if (pos_f == freezer)
425 freezer_apply_state(pos_f, freeze, 412 freezer_apply_state(pos_f, freeze,
426 CGROUP_FREEZING_SELF); 413 CGROUP_FREEZING_SELF);
427 } else { 414 else
428 /*
429 * Our update to @parent->state is already visible
430 * which is all we need. No need to lock @parent.
431 * For more info on synchronization, see
432 * freezer_post_create().
433 */
434 freezer_apply_state(pos_f, 415 freezer_apply_state(pos_f,
435 parent->state & CGROUP_FREEZING, 416 parent->state & CGROUP_FREEZING,
436 CGROUP_FREEZING_PARENT); 417 CGROUP_FREEZING_PARENT);
437 }
438 418
439 spin_unlock_irq(&pos_f->lock); 419 rcu_read_lock();
420 css_put(pos);
440 } 421 }
441 rcu_read_unlock(); 422 rcu_read_unlock();
423 mutex_unlock(&freezer_mutex);
442} 424}
443 425
444static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, 426static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a9e710eef0e2..247979a1b815 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -726,10 +726,12 @@ void set_cpu_present(unsigned int cpu, bool present)
726 726
727void set_cpu_online(unsigned int cpu, bool online) 727void set_cpu_online(unsigned int cpu, bool online)
728{ 728{
729 if (online) 729 if (online) {
730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
731 else 731 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
732 } else {
732 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 733 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
734 }
733} 735}
734 736
735void set_cpu_active(unsigned int cpu, bool active) 737void set_cpu_active(unsigned int cpu, bool active)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f83a71a3e46d..440eefc67397 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
1443 cpuctx->exclusive = 0; 1443 cpuctx->exclusive = 0;
1444} 1444}
1445 1445
1446struct remove_event {
1447 struct perf_event *event;
1448 bool detach_group;
1449};
1450
1446/* 1451/*
1447 * Cross CPU call to remove a performance event 1452 * Cross CPU call to remove a performance event
1448 * 1453 *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
1451 */ 1456 */
1452static int __perf_remove_from_context(void *info) 1457static int __perf_remove_from_context(void *info)
1453{ 1458{
1454 struct perf_event *event = info; 1459 struct remove_event *re = info;
1460 struct perf_event *event = re->event;
1455 struct perf_event_context *ctx = event->ctx; 1461 struct perf_event_context *ctx = event->ctx;
1456 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1462 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1457 1463
1458 raw_spin_lock(&ctx->lock); 1464 raw_spin_lock(&ctx->lock);
1459 event_sched_out(event, cpuctx, ctx); 1465 event_sched_out(event, cpuctx, ctx);
1466 if (re->detach_group)
1467 perf_group_detach(event);
1460 list_del_event(event, ctx); 1468 list_del_event(event, ctx);
1461 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1469 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1462 ctx->is_active = 0; 1470 ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
1481 * When called from perf_event_exit_task, it's OK because the 1489 * When called from perf_event_exit_task, it's OK because the
1482 * context has been detached from its task. 1490 * context has been detached from its task.
1483 */ 1491 */
1484static void perf_remove_from_context(struct perf_event *event) 1492static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1485{ 1493{
1486 struct perf_event_context *ctx = event->ctx; 1494 struct perf_event_context *ctx = event->ctx;
1487 struct task_struct *task = ctx->task; 1495 struct task_struct *task = ctx->task;
1496 struct remove_event re = {
1497 .event = event,
1498 .detach_group = detach_group,
1499 };
1488 1500
1489 lockdep_assert_held(&ctx->mutex); 1501 lockdep_assert_held(&ctx->mutex);
1490 1502
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
1493 * Per cpu events are removed via an smp call and 1505 * Per cpu events are removed via an smp call and
1494 * the removal is always successful. 1506 * the removal is always successful.
1495 */ 1507 */
1496 cpu_function_call(event->cpu, __perf_remove_from_context, event); 1508 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1497 return; 1509 return;
1498 } 1510 }
1499 1511
1500retry: 1512retry:
1501 if (!task_function_call(task, __perf_remove_from_context, event)) 1513 if (!task_function_call(task, __perf_remove_from_context, &re))
1502 return; 1514 return;
1503 1515
1504 raw_spin_lock_irq(&ctx->lock); 1516 raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
1515 * Since the task isn't running, its safe to remove the event, us 1527 * Since the task isn't running, its safe to remove the event, us
1516 * holding the ctx->lock ensures the task won't get scheduled in. 1528 * holding the ctx->lock ensures the task won't get scheduled in.
1517 */ 1529 */
1530 if (detach_group)
1531 perf_group_detach(event);
1518 list_del_event(event, ctx); 1532 list_del_event(event, ctx);
1519 raw_spin_unlock_irq(&ctx->lock); 1533 raw_spin_unlock_irq(&ctx->lock);
1520} 1534}
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
3178} 3192}
3179 3193
3180static void ring_buffer_put(struct ring_buffer *rb); 3194static void ring_buffer_put(struct ring_buffer *rb);
3181static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 3195static void ring_buffer_attach(struct perf_event *event,
3196 struct ring_buffer *rb);
3182 3197
3183static void unaccount_event_cpu(struct perf_event *event, int cpu) 3198static void unaccount_event_cpu(struct perf_event *event, int cpu)
3184{ 3199{
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
3238 unaccount_event(event); 3253 unaccount_event(event);
3239 3254
3240 if (event->rb) { 3255 if (event->rb) {
3241 struct ring_buffer *rb;
3242
3243 /* 3256 /*
3244 * Can happen when we close an event with re-directed output. 3257 * Can happen when we close an event with re-directed output.
3245 * 3258 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
3247 * over us; possibly making our ring_buffer_put() the last. 3260 * over us; possibly making our ring_buffer_put() the last.
3248 */ 3261 */
3249 mutex_lock(&event->mmap_mutex); 3262 mutex_lock(&event->mmap_mutex);
3250 rb = event->rb; 3263 ring_buffer_attach(event, NULL);
3251 if (rb) {
3252 rcu_assign_pointer(event->rb, NULL);
3253 ring_buffer_detach(event, rb);
3254 ring_buffer_put(rb); /* could be last */
3255 }
3256 mutex_unlock(&event->mmap_mutex); 3264 mutex_unlock(&event->mmap_mutex);
3257 } 3265 }
3258 3266
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
3281 * to trigger the AB-BA case. 3289 * to trigger the AB-BA case.
3282 */ 3290 */
3283 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3291 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3284 raw_spin_lock_irq(&ctx->lock); 3292 perf_remove_from_context(event, true);
3285 perf_group_detach(event);
3286 raw_spin_unlock_irq(&ctx->lock);
3287 perf_remove_from_context(event);
3288 mutex_unlock(&ctx->mutex); 3293 mutex_unlock(&ctx->mutex);
3289 3294
3290 free_event(event); 3295 free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
3839static void ring_buffer_attach(struct perf_event *event, 3844static void ring_buffer_attach(struct perf_event *event,
3840 struct ring_buffer *rb) 3845 struct ring_buffer *rb)
3841{ 3846{
3847 struct ring_buffer *old_rb = NULL;
3842 unsigned long flags; 3848 unsigned long flags;
3843 3849
3844 if (!list_empty(&event->rb_entry)) 3850 if (event->rb) {
3845 return; 3851 /*
3852 * Should be impossible, we set this when removing
3853 * event->rb_entry and wait/clear when adding event->rb_entry.
3854 */
3855 WARN_ON_ONCE(event->rcu_pending);
3846 3856
3847 spin_lock_irqsave(&rb->event_lock, flags); 3857 old_rb = event->rb;
3848 if (list_empty(&event->rb_entry)) 3858 event->rcu_batches = get_state_synchronize_rcu();
3849 list_add(&event->rb_entry, &rb->event_list); 3859 event->rcu_pending = 1;
3850 spin_unlock_irqrestore(&rb->event_lock, flags);
3851}
3852 3860
3853static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) 3861 spin_lock_irqsave(&old_rb->event_lock, flags);
3854{ 3862 list_del_rcu(&event->rb_entry);
3855 unsigned long flags; 3863 spin_unlock_irqrestore(&old_rb->event_lock, flags);
3864 }
3856 3865
3857 if (list_empty(&event->rb_entry)) 3866 if (event->rcu_pending && rb) {
3858 return; 3867 cond_synchronize_rcu(event->rcu_batches);
3868 event->rcu_pending = 0;
3869 }
3870
3871 if (rb) {
3872 spin_lock_irqsave(&rb->event_lock, flags);
3873 list_add_rcu(&event->rb_entry, &rb->event_list);
3874 spin_unlock_irqrestore(&rb->event_lock, flags);
3875 }
3876
3877 rcu_assign_pointer(event->rb, rb);
3859 3878
3860 spin_lock_irqsave(&rb->event_lock, flags); 3879 if (old_rb) {
3861 list_del_init(&event->rb_entry); 3880 ring_buffer_put(old_rb);
3862 wake_up_all(&event->waitq); 3881 /*
3863 spin_unlock_irqrestore(&rb->event_lock, flags); 3882 * Since we detached before setting the new rb, so that we
3883 * could attach the new rb, we could have missed a wakeup.
3884 * Provide it now.
3885 */
3886 wake_up_all(&event->waitq);
3887 }
3864} 3888}
3865 3889
3866static void ring_buffer_wakeup(struct perf_event *event) 3890static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3929{ 3953{
3930 struct perf_event *event = vma->vm_file->private_data; 3954 struct perf_event *event = vma->vm_file->private_data;
3931 3955
3932 struct ring_buffer *rb = event->rb; 3956 struct ring_buffer *rb = ring_buffer_get(event);
3933 struct user_struct *mmap_user = rb->mmap_user; 3957 struct user_struct *mmap_user = rb->mmap_user;
3934 int mmap_locked = rb->mmap_locked; 3958 int mmap_locked = rb->mmap_locked;
3935 unsigned long size = perf_data_size(rb); 3959 unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3937 atomic_dec(&rb->mmap_count); 3961 atomic_dec(&rb->mmap_count);
3938 3962
3939 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 3963 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3940 return; 3964 goto out_put;
3941 3965
3942 /* Detach current event from the buffer. */ 3966 ring_buffer_attach(event, NULL);
3943 rcu_assign_pointer(event->rb, NULL);
3944 ring_buffer_detach(event, rb);
3945 mutex_unlock(&event->mmap_mutex); 3967 mutex_unlock(&event->mmap_mutex);
3946 3968
3947 /* If there's still other mmap()s of this buffer, we're done. */ 3969 /* If there's still other mmap()s of this buffer, we're done. */
3948 if (atomic_read(&rb->mmap_count)) { 3970 if (atomic_read(&rb->mmap_count))
3949 ring_buffer_put(rb); /* can't be last */ 3971 goto out_put;
3950 return;
3951 }
3952 3972
3953 /* 3973 /*
3954 * No other mmap()s, detach from all other events that might redirect 3974 * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
3978 * still restart the iteration to make sure we're not now 3998 * still restart the iteration to make sure we're not now
3979 * iterating the wrong list. 3999 * iterating the wrong list.
3980 */ 4000 */
3981 if (event->rb == rb) { 4001 if (event->rb == rb)
3982 rcu_assign_pointer(event->rb, NULL); 4002 ring_buffer_attach(event, NULL);
3983 ring_buffer_detach(event, rb); 4003
3984 ring_buffer_put(rb); /* can't be last, we still have one */
3985 }
3986 mutex_unlock(&event->mmap_mutex); 4004 mutex_unlock(&event->mmap_mutex);
3987 put_event(event); 4005 put_event(event);
3988 4006
@@ -4007,6 +4025,7 @@ again:
4007 vma->vm_mm->pinned_vm -= mmap_locked; 4025 vma->vm_mm->pinned_vm -= mmap_locked;
4008 free_uid(mmap_user); 4026 free_uid(mmap_user);
4009 4027
4028out_put:
4010 ring_buffer_put(rb); /* could be last */ 4029 ring_buffer_put(rb); /* could be last */
4011} 4030}
4012 4031
@@ -4124,7 +4143,6 @@ again:
4124 vma->vm_mm->pinned_vm += extra; 4143 vma->vm_mm->pinned_vm += extra;
4125 4144
4126 ring_buffer_attach(event, rb); 4145 ring_buffer_attach(event, rb);
4127 rcu_assign_pointer(event->rb, rb);
4128 4146
4129 perf_event_init_userpage(event); 4147 perf_event_init_userpage(event);
4130 perf_event_update_userpage(event); 4148 perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
5408 5426
5409 /* Recursion avoidance in each contexts */ 5427 /* Recursion avoidance in each contexts */
5410 int recursion[PERF_NR_CONTEXTS]; 5428 int recursion[PERF_NR_CONTEXTS];
5429
5430 /* Keeps track of cpu being initialized/exited */
5431 bool online;
5411}; 5432};
5412 5433
5413static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5434static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
5654 hwc->state = !(flags & PERF_EF_START); 5675 hwc->state = !(flags & PERF_EF_START);
5655 5676
5656 head = find_swevent_head(swhash, event); 5677 head = find_swevent_head(swhash, event);
5657 if (WARN_ON_ONCE(!head)) 5678 if (!head) {
5679 /*
5680 * We can race with cpu hotplug code. Do not
5681 * WARN if the cpu just got unplugged.
5682 */
5683 WARN_ON_ONCE(swhash->online);
5658 return -EINVAL; 5684 return -EINVAL;
5685 }
5659 5686
5660 hlist_add_head_rcu(&event->hlist_entry, head); 5687 hlist_add_head_rcu(&event->hlist_entry, head);
5661 5688
@@ -6914,7 +6941,7 @@ err_size:
6914static int 6941static int
6915perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 6942perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6916{ 6943{
6917 struct ring_buffer *rb = NULL, *old_rb = NULL; 6944 struct ring_buffer *rb = NULL;
6918 int ret = -EINVAL; 6945 int ret = -EINVAL;
6919 6946
6920 if (!output_event) 6947 if (!output_event)
@@ -6942,8 +6969,6 @@ set:
6942 if (atomic_read(&event->mmap_count)) 6969 if (atomic_read(&event->mmap_count))
6943 goto unlock; 6970 goto unlock;
6944 6971
6945 old_rb = event->rb;
6946
6947 if (output_event) { 6972 if (output_event) {
6948 /* get the rb we want to redirect to */ 6973 /* get the rb we want to redirect to */
6949 rb = ring_buffer_get(output_event); 6974 rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
6951 goto unlock; 6976 goto unlock;
6952 } 6977 }
6953 6978
6954 if (old_rb) 6979 ring_buffer_attach(event, rb);
6955 ring_buffer_detach(event, old_rb);
6956
6957 if (rb)
6958 ring_buffer_attach(event, rb);
6959
6960 rcu_assign_pointer(event->rb, rb);
6961
6962 if (old_rb) {
6963 ring_buffer_put(old_rb);
6964 /*
6965 * Since we detached before setting the new rb, so that we
6966 * could attach the new rb, we could have missed a wakeup.
6967 * Provide it now.
6968 */
6969 wake_up_all(&event->waitq);
6970 }
6971 6980
6972 ret = 0; 6981 ret = 0;
6973unlock: 6982unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
7018 if (attr.freq) { 7027 if (attr.freq) {
7019 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7028 if (attr.sample_freq > sysctl_perf_event_sample_rate)
7020 return -EINVAL; 7029 return -EINVAL;
7030 } else {
7031 if (attr.sample_period & (1ULL << 63))
7032 return -EINVAL;
7021 } 7033 }
7022 7034
7023 /* 7035 /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
7165 struct perf_event_context *gctx = group_leader->ctx; 7177 struct perf_event_context *gctx = group_leader->ctx;
7166 7178
7167 mutex_lock(&gctx->mutex); 7179 mutex_lock(&gctx->mutex);
7168 perf_remove_from_context(group_leader); 7180 perf_remove_from_context(group_leader, false);
7169 7181
7170 /* 7182 /*
7171 * Removing from the context ends up with disabled 7183 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
7175 perf_event__state_init(group_leader); 7187 perf_event__state_init(group_leader);
7176 list_for_each_entry(sibling, &group_leader->sibling_list, 7188 list_for_each_entry(sibling, &group_leader->sibling_list,
7177 group_entry) { 7189 group_entry) {
7178 perf_remove_from_context(sibling); 7190 perf_remove_from_context(sibling, false);
7179 perf_event__state_init(sibling); 7191 perf_event__state_init(sibling);
7180 put_ctx(gctx); 7192 put_ctx(gctx);
7181 } 7193 }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7305 mutex_lock(&src_ctx->mutex); 7317 mutex_lock(&src_ctx->mutex);
7306 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7318 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7307 event_entry) { 7319 event_entry) {
7308 perf_remove_from_context(event); 7320 perf_remove_from_context(event, false);
7309 unaccount_event_cpu(event, src_cpu); 7321 unaccount_event_cpu(event, src_cpu);
7310 put_ctx(src_ctx); 7322 put_ctx(src_ctx);
7311 list_add(&event->migrate_entry, &events); 7323 list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7367 struct perf_event_context *child_ctx, 7379 struct perf_event_context *child_ctx,
7368 struct task_struct *child) 7380 struct task_struct *child)
7369{ 7381{
7370 if (child_event->parent) { 7382 perf_remove_from_context(child_event, !!child_event->parent);
7371 raw_spin_lock_irq(&child_ctx->lock);
7372 perf_group_detach(child_event);
7373 raw_spin_unlock_irq(&child_ctx->lock);
7374 }
7375
7376 perf_remove_from_context(child_event);
7377 7383
7378 /* 7384 /*
7379 * It can happen that the parent exits first, and has events 7385 * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
7724 * swapped under us. 7730 * swapped under us.
7725 */ 7731 */
7726 parent_ctx = perf_pin_task_context(parent, ctxn); 7732 parent_ctx = perf_pin_task_context(parent, ctxn);
7733 if (!parent_ctx)
7734 return 0;
7727 7735
7728 /* 7736 /*
7729 * No need to check if parent_ctx != NULL here; since we saw 7737 * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
7835 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7843 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7836 7844
7837 mutex_lock(&swhash->hlist_mutex); 7845 mutex_lock(&swhash->hlist_mutex);
7846 swhash->online = true;
7838 if (swhash->hlist_refcount > 0) { 7847 if (swhash->hlist_refcount > 0) {
7839 struct swevent_hlist *hlist; 7848 struct swevent_hlist *hlist;
7840 7849
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
7857 7866
7858static void __perf_event_exit_context(void *__info) 7867static void __perf_event_exit_context(void *__info)
7859{ 7868{
7869 struct remove_event re = { .detach_group = false };
7860 struct perf_event_context *ctx = __info; 7870 struct perf_event_context *ctx = __info;
7861 struct perf_event *event;
7862 7871
7863 perf_pmu_rotate_stop(ctx->pmu); 7872 perf_pmu_rotate_stop(ctx->pmu);
7864 7873
7865 rcu_read_lock(); 7874 rcu_read_lock();
7866 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) 7875 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
7867 __perf_remove_from_context(event); 7876 __perf_remove_from_context(&re);
7868 rcu_read_unlock(); 7877 rcu_read_unlock();
7869} 7878}
7870 7879
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
7892 perf_event_exit_cpu_context(cpu); 7901 perf_event_exit_cpu_context(cpu);
7893 7902
7894 mutex_lock(&swhash->hlist_mutex); 7903 mutex_lock(&swhash->hlist_mutex);
7904 swhash->online = false;
7895 swevent_hlist_release(swhash); 7905 swevent_hlist_release(swhash);
7896 mutex_unlock(&swhash->hlist_mutex); 7906 mutex_unlock(&swhash->hlist_mutex);
7897} 7907}
diff --git a/kernel/futex.c b/kernel/futex.c
index 5f589279e462..81dbe773ce4c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -745,7 +745,8 @@ void exit_pi_state_list(struct task_struct *curr)
745 745
746static int 746static int
747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, 747lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
748 union futex_key *key, struct futex_pi_state **ps) 748 union futex_key *key, struct futex_pi_state **ps,
749 struct task_struct *task)
749{ 750{
750 struct futex_pi_state *pi_state = NULL; 751 struct futex_pi_state *pi_state = NULL;
751 struct futex_q *this, *next; 752 struct futex_q *this, *next;
@@ -786,6 +787,16 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
786 return -EINVAL; 787 return -EINVAL;
787 } 788 }
788 789
790 /*
791 * Protect against a corrupted uval. If uval
792 * is 0x80000000 then pid is 0 and the waiter
793 * bit is set. So the deadlock check in the
794 * calling code has failed and we did not fall
795 * into the check above due to !pid.
796 */
797 if (task && pi_state->owner == task)
798 return -EDEADLK;
799
789 atomic_inc(&pi_state->refcount); 800 atomic_inc(&pi_state->refcount);
790 *ps = pi_state; 801 *ps = pi_state;
791 802
@@ -803,6 +814,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
803 if (!p) 814 if (!p)
804 return -ESRCH; 815 return -ESRCH;
805 816
817 if (!p->mm) {
818 put_task_struct(p);
819 return -EPERM;
820 }
821
806 /* 822 /*
807 * We need to look at the task state flags to figure out, 823 * We need to look at the task state flags to figure out,
808 * whether the task is exiting. To protect against the do_exit 824 * whether the task is exiting. To protect against the do_exit
@@ -935,7 +951,7 @@ retry:
935 * We dont have the lock. Look up the PI state (or create it if 951 * We dont have the lock. Look up the PI state (or create it if
936 * we are the first waiter): 952 * we are the first waiter):
937 */ 953 */
938 ret = lookup_pi_state(uval, hb, key, ps); 954 ret = lookup_pi_state(uval, hb, key, ps, task);
939 955
940 if (unlikely(ret)) { 956 if (unlikely(ret)) {
941 switch (ret) { 957 switch (ret) {
@@ -1347,7 +1363,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1347 * 1363 *
1348 * Return: 1364 * Return:
1349 * 0 - failed to acquire the lock atomically; 1365 * 0 - failed to acquire the lock atomically;
1350 * 1 - acquired the lock; 1366 * >0 - acquired the lock, return value is vpid of the top_waiter
1351 * <0 - error 1367 * <0 - error
1352 */ 1368 */
1353static int futex_proxy_trylock_atomic(u32 __user *pifutex, 1369static int futex_proxy_trylock_atomic(u32 __user *pifutex,
@@ -1358,7 +1374,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1358{ 1374{
1359 struct futex_q *top_waiter = NULL; 1375 struct futex_q *top_waiter = NULL;
1360 u32 curval; 1376 u32 curval;
1361 int ret; 1377 int ret, vpid;
1362 1378
1363 if (get_futex_value_locked(&curval, pifutex)) 1379 if (get_futex_value_locked(&curval, pifutex))
1364 return -EFAULT; 1380 return -EFAULT;
@@ -1386,11 +1402,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1386 * the contended case or if set_waiters is 1. The pi_state is returned 1402 * the contended case or if set_waiters is 1. The pi_state is returned
1387 * in ps in contended cases. 1403 * in ps in contended cases.
1388 */ 1404 */
1405 vpid = task_pid_vnr(top_waiter->task);
1389 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 1406 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1390 set_waiters); 1407 set_waiters);
1391 if (ret == 1) 1408 if (ret == 1) {
1392 requeue_pi_wake_futex(top_waiter, key2, hb2); 1409 requeue_pi_wake_futex(top_waiter, key2, hb2);
1393 1410 return vpid;
1411 }
1394 return ret; 1412 return ret;
1395} 1413}
1396 1414
@@ -1421,7 +1439,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1421 struct futex_pi_state *pi_state = NULL; 1439 struct futex_pi_state *pi_state = NULL;
1422 struct futex_hash_bucket *hb1, *hb2; 1440 struct futex_hash_bucket *hb1, *hb2;
1423 struct futex_q *this, *next; 1441 struct futex_q *this, *next;
1424 u32 curval2;
1425 1442
1426 if (requeue_pi) { 1443 if (requeue_pi) {
1427 /* 1444 /*
@@ -1509,16 +1526,25 @@ retry_private:
1509 * At this point the top_waiter has either taken uaddr2 or is 1526 * At this point the top_waiter has either taken uaddr2 or is
1510 * waiting on it. If the former, then the pi_state will not 1527 * waiting on it. If the former, then the pi_state will not
1511 * exist yet, look it up one more time to ensure we have a 1528 * exist yet, look it up one more time to ensure we have a
1512 * reference to it. 1529 * reference to it. If the lock was taken, ret contains the
1530 * vpid of the top waiter task.
1513 */ 1531 */
1514 if (ret == 1) { 1532 if (ret > 0) {
1515 WARN_ON(pi_state); 1533 WARN_ON(pi_state);
1516 drop_count++; 1534 drop_count++;
1517 task_count++; 1535 task_count++;
1518 ret = get_futex_value_locked(&curval2, uaddr2); 1536 /*
1519 if (!ret) 1537 * If we acquired the lock, then the user
1520 ret = lookup_pi_state(curval2, hb2, &key2, 1538 * space value of uaddr2 should be vpid. It
1521 &pi_state); 1539 * cannot be changed by the top waiter as it
1540 * is blocked on hb2 lock if it tries to do
1541 * so. If something fiddled with it behind our
1542 * back the pi state lookup might unearth
1543 * it. So we rather use the known value than
1544 * rereading and handing potential crap to
1545 * lookup_pi_state.
1546 */
1547 ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL);
1522 } 1548 }
1523 1549
1524 switch (ret) { 1550 switch (ret) {
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c8380ad203bc..28c57069ef68 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1683,6 +1683,14 @@ int kernel_kexec(void)
1683 kexec_in_progress = true; 1683 kexec_in_progress = true;
1684 kernel_restart_prepare(NULL); 1684 kernel_restart_prepare(NULL);
1685 migrate_to_reboot_cpu(); 1685 migrate_to_reboot_cpu();
1686
1687 /*
1688 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1689 * no further code needs to use CPU hotplug (which is true in
1690 * the reboot case). However, the kexec path depends on using
1691 * CPU hotplug again; so re-enable it here.
1692 */
1693 cpu_hotplug_enable();
1686 printk(KERN_EMERG "Starting new kernel\n"); 1694 printk(KERN_EMERG "Starting new kernel\n");
1687 machine_shutdown(); 1695 machine_shutdown();
1688 } 1696 }
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index aa4dff04b594..a620d4d08ca6 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
343 * top_waiter can be NULL, when we are in the deboosting 343 * top_waiter can be NULL, when we are in the deboosting
344 * mode! 344 * mode!
345 */ 345 */
346 if (top_waiter && (!task_has_pi_waiters(task) || 346 if (top_waiter) {
347 top_waiter != task_top_pi_waiter(task))) 347 if (!task_has_pi_waiters(task))
348 goto out_unlock_pi; 348 goto out_unlock_pi;
349 /*
350 * If deadlock detection is off, we stop here if we
351 * are not the top pi waiter of the task.
352 */
353 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
354 goto out_unlock_pi;
355 }
349 356
350 /* 357 /*
351 * When deadlock detection is off then we check, if further 358 * When deadlock detection is off then we check, if further
@@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
361 goto retry; 368 goto retry;
362 } 369 }
363 370
364 /* Deadlock detection */ 371 /*
372 * Deadlock detection. If the lock is the same as the original
373 * lock which caused us to walk the lock chain or if the
374 * current lock is owned by the task which initiated the chain
375 * walk, we detected a deadlock.
376 */
365 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 377 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
366 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 378 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
367 raw_spin_unlock(&lock->wait_lock); 379 raw_spin_unlock(&lock->wait_lock);
@@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
527 unsigned long flags; 539 unsigned long flags;
528 int chain_walk = 0, res; 540 int chain_walk = 0, res;
529 541
542 /*
543 * Early deadlock detection. We really don't want the task to
544 * enqueue on itself just to untangle the mess later. It's not
545 * only an optimization. We drop the locks, so another waiter
546 * can come in before the chain walk detects the deadlock. So
547 * the other will detect the deadlock and return -EDEADLOCK,
548 * which is wrong, as the other waiter is not in a deadlock
549 * situation.
550 */
551 if (detect_deadlock && owner == task)
552 return -EDEADLK;
553
530 raw_spin_lock_irqsave(&task->pi_lock, flags); 554 raw_spin_lock_irqsave(&task->pi_lock, flags);
531 __rt_mutex_adjust_prio(task); 555 __rt_mutex_adjust_prio(task);
532 waiter->task = task; 556 waiter->task = task;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d9d8ece46a15..0a7251678982 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
2592 if (likely(prev->sched_class == class && 2592 if (likely(prev->sched_class == class &&
2593 rq->nr_running == rq->cfs.h_nr_running)) { 2593 rq->nr_running == rq->cfs.h_nr_running)) {
2594 p = fair_sched_class.pick_next_task(rq, prev); 2594 p = fair_sched_class.pick_next_task(rq, prev);
2595 if (likely(p && p != RETRY_TASK)) 2595 if (unlikely(p == RETRY_TASK))
2596 return p; 2596 goto again;
2597
2598 /* assumes fair_sched_class->next == idle_sched_class */
2599 if (unlikely(!p))
2600 p = idle_sched_class.pick_next_task(rq, prev);
2601
2602 return p;
2597 } 2603 }
2598 2604
2599again: 2605again:
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3124 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3130 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3125 dl_se->dl_throttled = 0; 3131 dl_se->dl_throttled = 0;
3126 dl_se->dl_new = 1; 3132 dl_se->dl_new = 1;
3133 dl_se->dl_yielded = 0;
3127} 3134}
3128 3135
3129static void __setscheduler_params(struct task_struct *p, 3136static void __setscheduler_params(struct task_struct *p,
@@ -3188,17 +3195,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3188 * We ask for the deadline not being zero, and greater or equal 3195 * We ask for the deadline not being zero, and greater or equal
3189 * than the runtime, as well as the period of being zero or 3196 * than the runtime, as well as the period of being zero or
3190 * greater than deadline. Furthermore, we have to be sure that 3197 * greater than deadline. Furthermore, we have to be sure that
3191 * user parameters are above the internal resolution (1us); we 3198 * user parameters are above the internal resolution of 1us (we
3192 * check sched_runtime only since it is always the smaller one. 3199 * check sched_runtime only since it is always the smaller one) and
3200 * below 2^63 ns (we have to check both sched_deadline and
3201 * sched_period, as the latter can be zero).
3193 */ 3202 */
3194static bool 3203static bool
3195__checkparam_dl(const struct sched_attr *attr) 3204__checkparam_dl(const struct sched_attr *attr)
3196{ 3205{
3197 return attr && attr->sched_deadline != 0 && 3206 /* deadline != 0 */
3198 (attr->sched_period == 0 || 3207 if (attr->sched_deadline == 0)
3199 (s64)(attr->sched_period - attr->sched_deadline) >= 0) && 3208 return false;
3200 (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && 3209
3201 attr->sched_runtime >= (2 << (DL_SCALE - 1)); 3210 /*
3211 * Since we truncate DL_SCALE bits, make sure we're at least
3212 * that big.
3213 */
3214 if (attr->sched_runtime < (1ULL << DL_SCALE))
3215 return false;
3216
3217 /*
3218 * Since we use the MSB for wrap-around and sign issues, make
3219 * sure it's not set (mind that period can be equal to zero).
3220 */
3221 if (attr->sched_deadline & (1ULL << 63) ||
3222 attr->sched_period & (1ULL << 63))
3223 return false;
3224
3225 /* runtime <= deadline <= period (if period != 0) */
3226 if ((attr->sched_period != 0 &&
3227 attr->sched_period < attr->sched_deadline) ||
3228 attr->sched_deadline < attr->sched_runtime)
3229 return false;
3230
3231 return true;
3202} 3232}
3203 3233
3204/* 3234/*
@@ -3639,6 +3669,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3639 * sys_sched_setattr - same as above, but with extended sched_attr 3669 * sys_sched_setattr - same as above, but with extended sched_attr
3640 * @pid: the pid in question. 3670 * @pid: the pid in question.
3641 * @uattr: structure containing the extended parameters. 3671 * @uattr: structure containing the extended parameters.
3672 * @flags: for future extension.
3642 */ 3673 */
3643SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3674SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3644 unsigned int, flags) 3675 unsigned int, flags)
@@ -3650,8 +3681,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3650 if (!uattr || pid < 0 || flags) 3681 if (!uattr || pid < 0 || flags)
3651 return -EINVAL; 3682 return -EINVAL;
3652 3683
3653 if (sched_copy_attr(uattr, &attr)) 3684 retval = sched_copy_attr(uattr, &attr);
3654 return -EFAULT; 3685 if (retval)
3686 return retval;
3687
3688 if (attr.sched_policy < 0)
3689 return -EINVAL;
3655 3690
3656 rcu_read_lock(); 3691 rcu_read_lock();
3657 retval = -ESRCH; 3692 retval = -ESRCH;
@@ -3701,7 +3736,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3701 */ 3736 */
3702SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3737SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3703{ 3738{
3704 struct sched_param lp; 3739 struct sched_param lp = { .sched_priority = 0 };
3705 struct task_struct *p; 3740 struct task_struct *p;
3706 int retval; 3741 int retval;
3707 3742
@@ -3718,11 +3753,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3718 if (retval) 3753 if (retval)
3719 goto out_unlock; 3754 goto out_unlock;
3720 3755
3721 if (task_has_dl_policy(p)) { 3756 if (task_has_rt_policy(p))
3722 retval = -EINVAL; 3757 lp.sched_priority = p->rt_priority;
3723 goto out_unlock;
3724 }
3725 lp.sched_priority = p->rt_priority;
3726 rcu_read_unlock(); 3758 rcu_read_unlock();
3727 3759
3728 /* 3760 /*
@@ -3783,6 +3815,7 @@ err_size:
3783 * @pid: the pid in question. 3815 * @pid: the pid in question.
3784 * @uattr: structure containing the extended parameters. 3816 * @uattr: structure containing the extended parameters.
3785 * @size: sizeof(attr) for fwd/bwd comp. 3817 * @size: sizeof(attr) for fwd/bwd comp.
3818 * @flags: for future extension.
3786 */ 3819 */
3787SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3820SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3788 unsigned int, size, unsigned int, flags) 3821 unsigned int, size, unsigned int, flags)
@@ -5043,7 +5076,6 @@ static int sched_cpu_active(struct notifier_block *nfb,
5043 unsigned long action, void *hcpu) 5076 unsigned long action, void *hcpu)
5044{ 5077{
5045 switch (action & ~CPU_TASKS_FROZEN) { 5078 switch (action & ~CPU_TASKS_FROZEN) {
5046 case CPU_STARTING:
5047 case CPU_DOWN_FAILED: 5079 case CPU_DOWN_FAILED:
5048 set_cpu_active((long)hcpu, true); 5080 set_cpu_active((long)hcpu, true);
5049 return NOTIFY_OK; 5081 return NOTIFY_OK;
@@ -6017,6 +6049,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6017 , 6049 ,
6018 .last_balance = jiffies, 6050 .last_balance = jiffies,
6019 .balance_interval = sd_weight, 6051 .balance_interval = sd_weight,
6052 .max_newidle_lb_cost = 0,
6053 .next_decay_max_lb_cost = jiffies,
6020 }; 6054 };
6021 SD_INIT_NAME(sd, NUMA); 6055 SD_INIT_NAME(sd, NUMA);
6022 sd->private = &tl->data; 6056 sd->private = &tl->data;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5b9bb42b2d47..bd95963dae80 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/gfp.h> 14#include <linux/gfp.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/slab.h>
16#include "cpudeadline.h" 17#include "cpudeadline.h"
17 18
18static inline int parent(int i) 19static inline int parent(int i)
@@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b)
39{ 40{
40 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; 41 int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu;
41 42
42 swap(cp->elements[a], cp->elements[b]); 43 swap(cp->elements[a].cpu, cp->elements[b].cpu);
43 swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); 44 swap(cp->elements[a].dl , cp->elements[b].dl );
45
46 swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx);
44} 47}
45 48
46static void cpudl_heapify(struct cpudl *cp, int idx) 49static void cpudl_heapify(struct cpudl *cp, int idx)
@@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
140 WARN_ON(!cpu_present(cpu)); 143 WARN_ON(!cpu_present(cpu));
141 144
142 raw_spin_lock_irqsave(&cp->lock, flags); 145 raw_spin_lock_irqsave(&cp->lock, flags);
143 old_idx = cp->cpu_to_idx[cpu]; 146 old_idx = cp->elements[cpu].idx;
144 if (!is_valid) { 147 if (!is_valid) {
145 /* remove item */ 148 /* remove item */
146 if (old_idx == IDX_INVALID) { 149 if (old_idx == IDX_INVALID) {
@@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
155 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; 158 cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
156 cp->elements[old_idx].cpu = new_cpu; 159 cp->elements[old_idx].cpu = new_cpu;
157 cp->size--; 160 cp->size--;
158 cp->cpu_to_idx[new_cpu] = old_idx; 161 cp->elements[new_cpu].idx = old_idx;
159 cp->cpu_to_idx[cpu] = IDX_INVALID; 162 cp->elements[cpu].idx = IDX_INVALID;
160 while (old_idx > 0 && dl_time_before( 163 while (old_idx > 0 && dl_time_before(
161 cp->elements[parent(old_idx)].dl, 164 cp->elements[parent(old_idx)].dl,
162 cp->elements[old_idx].dl)) { 165 cp->elements[old_idx].dl)) {
@@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
173 cp->size++; 176 cp->size++;
174 cp->elements[cp->size - 1].dl = 0; 177 cp->elements[cp->size - 1].dl = 0;
175 cp->elements[cp->size - 1].cpu = cpu; 178 cp->elements[cp->size - 1].cpu = cpu;
176 cp->cpu_to_idx[cpu] = cp->size - 1; 179 cp->elements[cpu].idx = cp->size - 1;
177 cpudl_change_key(cp, cp->size - 1, dl); 180 cpudl_change_key(cp, cp->size - 1, dl);
178 cpumask_clear_cpu(cpu, cp->free_cpus); 181 cpumask_clear_cpu(cpu, cp->free_cpus);
179 } else { 182 } else {
@@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp)
195 memset(cp, 0, sizeof(*cp)); 198 memset(cp, 0, sizeof(*cp));
196 raw_spin_lock_init(&cp->lock); 199 raw_spin_lock_init(&cp->lock);
197 cp->size = 0; 200 cp->size = 0;
198 for (i = 0; i < NR_CPUS; i++) 201
199 cp->cpu_to_idx[i] = IDX_INVALID; 202 cp->elements = kcalloc(nr_cpu_ids,
200 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) 203 sizeof(struct cpudl_item),
204 GFP_KERNEL);
205 if (!cp->elements)
206 return -ENOMEM;
207
208 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
209 kfree(cp->elements);
201 return -ENOMEM; 210 return -ENOMEM;
211 }
212
213 for_each_possible_cpu(i)
214 cp->elements[i].idx = IDX_INVALID;
215
202 cpumask_setall(cp->free_cpus); 216 cpumask_setall(cp->free_cpus);
203 217
204 return 0; 218 return 0;
@@ -210,7 +224,6 @@ int cpudl_init(struct cpudl *cp)
210 */ 224 */
211void cpudl_cleanup(struct cpudl *cp) 225void cpudl_cleanup(struct cpudl *cp)
212{ 226{
213 /* 227 free_cpumask_var(cp->free_cpus);
214 * nothing to do for the moment 228 kfree(cp->elements);
215 */
216} 229}
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index a202789a412c..538c9796ad4a 100644
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -5,17 +5,17 @@
5 5
6#define IDX_INVALID -1 6#define IDX_INVALID -1
7 7
8struct array_item { 8struct cpudl_item {
9 u64 dl; 9 u64 dl;
10 int cpu; 10 int cpu;
11 int idx;
11}; 12};
12 13
13struct cpudl { 14struct cpudl {
14 raw_spinlock_t lock; 15 raw_spinlock_t lock;
15 int size; 16 int size;
16 int cpu_to_idx[NR_CPUS];
17 struct array_item elements[NR_CPUS];
18 cpumask_var_t free_cpus; 17 cpumask_var_t free_cpus;
18 struct cpudl_item *elements;
19}; 19};
20 20
21 21
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8b836b376d91..8834243abee2 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -30,6 +30,7 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/sched/rt.h> 32#include <linux/sched/rt.h>
33#include <linux/slab.h>
33#include "cpupri.h" 34#include "cpupri.h"
34 35
35/* Convert between a 140 based task->prio, and our 102 based cpupri */ 36/* Convert between a 140 based task->prio, and our 102 based cpupri */
@@ -70,8 +71,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
70 int idx = 0; 71 int idx = 0;
71 int task_pri = convert_prio(p->prio); 72 int task_pri = convert_prio(p->prio);
72 73
73 if (task_pri >= MAX_RT_PRIO) 74 BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
74 return 0;
75 75
76 for (idx = 0; idx < task_pri; idx++) { 76 for (idx = 0; idx < task_pri; idx++) {
77 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 77 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
@@ -219,8 +219,13 @@ int cpupri_init(struct cpupri *cp)
219 goto cleanup; 219 goto cleanup;
220 } 220 }
221 221
222 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
223 if (!cp->cpu_to_pri)
224 goto cleanup;
225
222 for_each_possible_cpu(i) 226 for_each_possible_cpu(i)
223 cp->cpu_to_pri[i] = CPUPRI_INVALID; 227 cp->cpu_to_pri[i] = CPUPRI_INVALID;
228
224 return 0; 229 return 0;
225 230
226cleanup: 231cleanup:
@@ -237,6 +242,7 @@ void cpupri_cleanup(struct cpupri *cp)
237{ 242{
238 int i; 243 int i;
239 244
245 kfree(cp->cpu_to_pri);
240 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) 246 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
241 free_cpumask_var(cp->pri_to_cpu[i].mask); 247 free_cpumask_var(cp->pri_to_cpu[i].mask);
242} 248}
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
index f6d756173491..6b033347fdfd 100644
--- a/kernel/sched/cpupri.h
+++ b/kernel/sched/cpupri.h
@@ -17,7 +17,7 @@ struct cpupri_vec {
17 17
18struct cpupri { 18struct cpupri {
19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; 19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
20 int cpu_to_pri[NR_CPUS]; 20 int *cpu_to_pri;
21}; 21};
22 22
23#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a95097cb4591..72fdf06ef865 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -332,50 +332,50 @@ out:
332 * softirq as those do not count in task exec_runtime any more. 332 * softirq as those do not count in task exec_runtime any more.
333 */ 333 */
334static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 334static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
335 struct rq *rq) 335 struct rq *rq, int ticks)
336{ 336{
337 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 337 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
338 u64 cputime = (__force u64) cputime_one_jiffy;
338 u64 *cpustat = kcpustat_this_cpu->cpustat; 339 u64 *cpustat = kcpustat_this_cpu->cpustat;
339 340
340 if (steal_account_process_tick()) 341 if (steal_account_process_tick())
341 return; 342 return;
342 343
344 cputime *= ticks;
345 scaled *= ticks;
346
343 if (irqtime_account_hi_update()) { 347 if (irqtime_account_hi_update()) {
344 cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; 348 cpustat[CPUTIME_IRQ] += cputime;
345 } else if (irqtime_account_si_update()) { 349 } else if (irqtime_account_si_update()) {
346 cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; 350 cpustat[CPUTIME_SOFTIRQ] += cputime;
347 } else if (this_cpu_ksoftirqd() == p) { 351 } else if (this_cpu_ksoftirqd() == p) {
348 /* 352 /*
349 * ksoftirqd time do not get accounted in cpu_softirq_time. 353 * ksoftirqd time do not get accounted in cpu_softirq_time.
350 * So, we have to handle it separately here. 354 * So, we have to handle it separately here.
351 * Also, p->stime needs to be updated for ksoftirqd. 355 * Also, p->stime needs to be updated for ksoftirqd.
352 */ 356 */
353 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 357 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
354 CPUTIME_SOFTIRQ);
355 } else if (user_tick) { 358 } else if (user_tick) {
356 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 359 account_user_time(p, cputime, scaled);
357 } else if (p == rq->idle) { 360 } else if (p == rq->idle) {
358 account_idle_time(cputime_one_jiffy); 361 account_idle_time(cputime);
359 } else if (p->flags & PF_VCPU) { /* System time or guest time */ 362 } else if (p->flags & PF_VCPU) { /* System time or guest time */
360 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); 363 account_guest_time(p, cputime, scaled);
361 } else { 364 } else {
362 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 365 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
363 CPUTIME_SYSTEM);
364 } 366 }
365} 367}
366 368
367static void irqtime_account_idle_ticks(int ticks) 369static void irqtime_account_idle_ticks(int ticks)
368{ 370{
369 int i;
370 struct rq *rq = this_rq(); 371 struct rq *rq = this_rq();
371 372
372 for (i = 0; i < ticks; i++) 373 irqtime_account_process_tick(current, 0, rq, ticks);
373 irqtime_account_process_tick(current, 0, rq);
374} 374}
375#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 375#else /* CONFIG_IRQ_TIME_ACCOUNTING */
376static inline void irqtime_account_idle_ticks(int ticks) {} 376static inline void irqtime_account_idle_ticks(int ticks) {}
377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, 377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
378 struct rq *rq) {} 378 struct rq *rq, int nr_ticks) {}
379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
380 380
381/* 381/*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
464 return; 464 return;
465 465
466 if (sched_clock_irqtime) { 466 if (sched_clock_irqtime) {
467 irqtime_account_process_tick(p, user_tick, rq); 467 irqtime_account_process_tick(p, user_tick, rq, 1);
468 return; 468 return;
469 } 469 }
470 470
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b08095786cb8..800e99b99075 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
528 sched_clock_tick(); 528 sched_clock_tick();
529 update_rq_clock(rq); 529 update_rq_clock(rq);
530 dl_se->dl_throttled = 0; 530 dl_se->dl_throttled = 0;
531 dl_se->dl_yielded = 0;
531 if (p->on_rq) { 532 if (p->on_rq) {
532 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 533 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
533 if (task_has_dl_policy(rq->curr)) 534 if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
893 * We make the task go to sleep until its current deadline by 894 * We make the task go to sleep until its current deadline by
894 * forcing its runtime to zero. This way, update_curr_dl() stops 895 * forcing its runtime to zero. This way, update_curr_dl() stops
895 * it and the bandwidth timer will wake it up and will give it 896 * it and the bandwidth timer will wake it up and will give it
896 * new scheduling parameters (thanks to dl_new=1). 897 * new scheduling parameters (thanks to dl_yielded=1).
897 */ 898 */
898 if (p->dl.runtime > 0) { 899 if (p->dl.runtime > 0) {
899 rq->curr->dl.dl_new = 1; 900 rq->curr->dl.dl_yielded = 1;
900 p->dl.runtime = 0; 901 p->dl.runtime = 0;
901 } 902 }
902 update_curr_dl(rq); 903 update_curr_dl(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7570dd969c28..0fdb96de81a5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
6653 int this_cpu = this_rq->cpu; 6653 int this_cpu = this_rq->cpu;
6654 6654
6655 idle_enter_fair(this_rq); 6655 idle_enter_fair(this_rq);
6656
6656 /* 6657 /*
6657 * We must set idle_stamp _before_ calling idle_balance(), such that we 6658 * We must set idle_stamp _before_ calling idle_balance(), such that we
6658 * measure the duration of idle_balance() as idle time. 6659 * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
6705 6706
6706 raw_spin_lock(&this_rq->lock); 6707 raw_spin_lock(&this_rq->lock);
6707 6708
6709 if (curr_cost > this_rq->max_idle_balance_cost)
6710 this_rq->max_idle_balance_cost = curr_cost;
6711
6708 /* 6712 /*
6709 * While browsing the domains, we released the rq lock. 6713 * While browsing the domains, we released the rq lock, a task could
6710 * A task could have be enqueued in the meantime 6714 * have been enqueued in the meantime. Since we're not going idle,
6715 * pretend we pulled a task.
6711 */ 6716 */
6712 if (this_rq->cfs.h_nr_running && !pulled_task) { 6717 if (this_rq->cfs.h_nr_running && !pulled_task)
6713 pulled_task = 1; 6718 pulled_task = 1;
6714 goto out;
6715 }
6716 6719
6717 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6720 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6718 /* 6721 /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
6722 this_rq->next_balance = next_balance; 6725 this_rq->next_balance = next_balance;
6723 } 6726 }
6724 6727
6725 if (curr_cost > this_rq->max_idle_balance_cost)
6726 this_rq->max_idle_balance_cost = curr_cost;
6727
6728out: 6728out:
6729 /* Is there a task of a high priority class? */ 6729 /* Is there a task of a high priority class? */
6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running && 6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
diff --git a/mm/filemap.c b/mm/filemap.c
index 000a220e2a41..088358c8006b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
257{ 257{
258 int ret = 0; 258 int ret = 0;
259 /* Check for outstanding write errors */ 259 /* Check for outstanding write errors */
260 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 260 if (test_bit(AS_ENOSPC, &mapping->flags) &&
261 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
261 ret = -ENOSPC; 262 ret = -ENOSPC;
262 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 263 if (test_bit(AS_EIO, &mapping->flags) &&
264 test_and_clear_bit(AS_EIO, &mapping->flags))
263 ret = -EIO; 265 ret = -EIO;
264 return ret; 266 return ret;
265} 267}
diff --git a/mm/madvise.c b/mm/madvise.c
index 539eeb96b323..a402f8fdc68e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
195 for (; start < end; start += PAGE_SIZE) { 195 for (; start < end; start += PAGE_SIZE) {
196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
197 197
198 page = find_get_page(mapping, index); 198 page = find_get_entry(mapping, index);
199 if (!radix_tree_exceptional_entry(page)) { 199 if (!radix_tree_exceptional_entry(page)) {
200 if (page) 200 if (page)
201 page_cache_release(page); 201 page_cache_release(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c47dffdcb246..5177c6d4a2dd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1077 1077
1078 rcu_read_lock(); 1078 rcu_read_lock();
1079 do { 1079 do {
1080 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1080 /*
1081 if (unlikely(!memcg)) 1081 * Page cache insertions can happen withou an
1082 * actual mm context, e.g. during disk probing
1083 * on boot, loopback IO, acct() writes etc.
1084 */
1085 if (unlikely(!mm))
1082 memcg = root_mem_cgroup; 1086 memcg = root_mem_cgroup;
1087 else {
1088 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1089 if (unlikely(!memcg))
1090 memcg = root_mem_cgroup;
1091 }
1083 } while (!css_tryget(&memcg->css)); 1092 } while (!css_tryget(&memcg->css));
1084 rcu_read_unlock(); 1093 rcu_read_unlock();
1085 return memcg; 1094 return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3958 return 0; 3967 return 0;
3959 } 3968 }
3960 3969
3961 /* 3970 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3962 * Page cache insertions can happen without an actual mm 3971 if (!memcg)
3963 * context, e.g. during disk probing on boot. 3972 return -ENOMEM;
3964 */
3965 if (unlikely(!mm))
3966 memcg = root_mem_cgroup;
3967 else {
3968 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3969 if (!memcg)
3970 return -ENOMEM;
3971 }
3972 __mem_cgroup_commit_charge(memcg, page, 1, type, false); 3973 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3973 return 0; 3974 return 0;
3974} 3975}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 35ef28acf137..9ccef39a9de2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1081 return 0; 1081 return 0;
1082 } else if (PageHuge(hpage)) { 1082 } else if (PageHuge(hpage)) {
1083 /* 1083 /*
1084 * Check "just unpoisoned", "filter hit", and 1084 * Check "filter hit" and "race with other subpage."
1085 * "race with other subpage."
1086 */ 1085 */
1087 lock_page(hpage); 1086 lock_page(hpage);
1088 if (!PageHWPoison(hpage) 1087 if (PageHWPoison(hpage)) {
1089 || (hwpoison_filter(p) && TestClearPageHWPoison(p)) 1088 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1090 || (p != hpage && TestSetPageHWPoison(hpage))) { 1089 || (p != hpage && TestSetPageHWPoison(hpage))) {
1091 atomic_long_sub(nr_pages, &num_poisoned_pages); 1090 atomic_long_sub(nr_pages, &num_poisoned_pages);
1092 return 0; 1091 unlock_page(hpage);
1092 return 0;
1093 }
1093 } 1094 }
1094 set_page_hwpoison_huge_page(hpage); 1095 set_page_hwpoison_huge_page(hpage);
1095 res = dequeue_hwpoisoned_huge_page(hpage); 1096 res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1152 */ 1153 */
1153 if (!PageHWPoison(p)) { 1154 if (!PageHWPoison(p)) {
1154 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1155 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1156 atomic_long_sub(nr_pages, &num_poisoned_pages);
1157 put_page(hpage);
1155 res = 0; 1158 res = 0;
1156 goto out; 1159 goto out;
1157 } 1160 }
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 175273f38cb1..44ebd5c2cd4a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
169 if (err < 0) 169 if (err < 0)
170 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
171 171
172 vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
172 err = register_netdevice(dev); 173 err = register_netdevice(dev);
173 if (err < 0) 174 if (err < 0)
174 goto out_uninit_mvrp; 175 goto out_uninit_mvrp;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 733ec283ed1b..019efb79708f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
493 } 493 }
494} 494}
495 495
496static int vlan_calculate_locking_subclass(struct net_device *real_dev)
497{
498 int subclass = 0;
499
500 while (is_vlan_dev(real_dev)) {
501 subclass++;
502 real_dev = vlan_dev_priv(real_dev)->real_dev;
503 }
504
505 return subclass;
506}
507
508static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
509{
510 int err = 0, subclass;
511
512 subclass = vlan_calculate_locking_subclass(to);
513
514 spin_lock_nested(&to->addr_list_lock, subclass);
515 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
516 if (!err)
517 __dev_set_rx_mode(to);
518 spin_unlock(&to->addr_list_lock);
519}
520
521static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
522{
523 int err = 0, subclass;
524
525 subclass = vlan_calculate_locking_subclass(to);
526
527 spin_lock_nested(&to->addr_list_lock, subclass);
528 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
529 if (!err)
530 __dev_set_rx_mode(to);
531 spin_unlock(&to->addr_list_lock);
532}
533
534static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 496static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
535{ 497{
536 vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 498 dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
537 vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 499 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
538} 500}
539 501
540/* 502/*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
562 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); 524 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
563} 525}
564 526
527static int vlan_dev_get_lock_subclass(struct net_device *dev)
528{
529 return vlan_dev_priv(dev)->nest_level;
530}
531
565static const struct header_ops vlan_header_ops = { 532static const struct header_ops vlan_header_ops = {
566 .create = vlan_dev_hard_header, 533 .create = vlan_dev_hard_header,
567 .rebuild = vlan_dev_rebuild_header, 534 .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
597static int vlan_dev_init(struct net_device *dev) 564static int vlan_dev_init(struct net_device *dev)
598{ 565{
599 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 566 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
600 int subclass = 0;
601 567
602 netif_carrier_off(dev); 568 netif_carrier_off(dev);
603 569
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
646 612
647 SET_NETDEV_DEVTYPE(dev, &vlan_type); 613 SET_NETDEV_DEVTYPE(dev, &vlan_type);
648 614
649 subclass = vlan_calculate_locking_subclass(dev); 615 vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
650 vlan_dev_set_lockdep_class(dev, subclass);
651 616
652 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 617 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
653 if (!vlan_dev_priv(dev)->vlan_pcpu_stats) 618 if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
819 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 784 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
820#endif 785#endif
821 .ndo_fix_features = vlan_dev_fix_features, 786 .ndo_fix_features = vlan_dev_fix_features,
787 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
822}; 788};
823 789
824void vlan_setup(struct net_device *dev) 790void vlan_setup(struct net_device *dev)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index b3bd4ec3fd94..f04224c32005 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1545,6 +1545,8 @@ out_neigh:
1545 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1545 if ((orig_neigh_node) && (!is_single_hop_neigh))
1546 batadv_orig_node_free_ref(orig_neigh_node); 1546 batadv_orig_node_free_ref(orig_neigh_node);
1547out: 1547out:
1548 if (router_ifinfo)
1549 batadv_neigh_ifinfo_free_ref(router_ifinfo);
1548 if (router) 1550 if (router)
1549 batadv_neigh_node_free_ref(router); 1551 batadv_neigh_node_free_ref(router);
1550 if (router_router) 1552 if (router_router)
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index b25fd64d727b..aa5d4946d0d7 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
940 * additional DAT answer may trigger kernel warnings about 940 * additional DAT answer may trigger kernel warnings about
941 * a packet coming from the wrong port. 941 * a packet coming from the wrong port.
942 */ 942 */
943 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, 943 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
944 BATADV_NO_FLAGS)) {
945 ret = true; 944 ret = true;
946 goto out; 945 goto out;
947 } 946 }
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index bcc4bea632fa..f14e54a05691 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
418 struct batadv_neigh_node *neigh_node) 418 struct batadv_neigh_node *neigh_node)
419{ 419{
420 struct batadv_priv *bat_priv; 420 struct batadv_priv *bat_priv;
421 struct batadv_hard_iface *primary_if; 421 struct batadv_hard_iface *primary_if = NULL;
422 struct batadv_frag_packet frag_header; 422 struct batadv_frag_packet frag_header;
423 struct sk_buff *skb_fragment; 423 struct sk_buff *skb_fragment;
424 unsigned mtu = neigh_node->if_incoming->net_dev->mtu; 424 unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
425 unsigned header_size = sizeof(frag_header); 425 unsigned header_size = sizeof(frag_header);
426 unsigned max_fragment_size, max_packet_size; 426 unsigned max_fragment_size, max_packet_size;
427 bool ret = false;
427 428
428 /* To avoid merge and refragmentation at next-hops we never send 429 /* To avoid merge and refragmentation at next-hops we never send
429 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 430 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
483 skb->len + ETH_HLEN); 484 skb->len + ETH_HLEN);
484 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 485 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
485 486
486 return true; 487 ret = true;
488
487out_err: 489out_err:
488 return false; 490 if (primary_if)
491 batadv_hardif_free_ref(primary_if);
492
493 return ret;
489} 494}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index c835e137423b..90cff585b37d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -42,8 +42,10 @@
42 42
43static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) 43static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
44{ 44{
45 if (atomic_dec_and_test(&gw_node->refcount)) 45 if (atomic_dec_and_test(&gw_node->refcount)) {
46 batadv_orig_node_free_ref(gw_node->orig_node);
46 kfree_rcu(gw_node, rcu); 47 kfree_rcu(gw_node, rcu);
48 }
47} 49}
48 50
49static struct batadv_gw_node * 51static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
406 if (gateway->bandwidth_down == 0) 408 if (gateway->bandwidth_down == 0)
407 return; 409 return;
408 410
411 if (!atomic_inc_not_zero(&orig_node->refcount))
412 return;
413
409 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); 414 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
410 if (!gw_node) 415 if (!gw_node) {
416 batadv_orig_node_free_ref(orig_node);
411 return; 417 return;
418 }
412 419
413 INIT_HLIST_NODE(&gw_node->list); 420 INIT_HLIST_NODE(&gw_node->list);
414 gw_node->orig_node = orig_node; 421 gw_node->orig_node = orig_node;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b851cc580853..fbda6b54baff 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
83 return true; 83 return true;
84 84
85 /* no more parents..stop recursion */ 85 /* no more parents..stop recursion */
86 if (net_dev->iflink == net_dev->ifindex) 86 if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
87 return false; 87 return false;
88 88
89 /* recurse over the parent device */ 89 /* recurse over the parent device */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ffd9dfbd9b0e..6a484514cd3e 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) 501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
502{ 502{
503 struct batadv_orig_ifinfo *orig_ifinfo; 503 struct batadv_orig_ifinfo *orig_ifinfo;
504 struct batadv_neigh_node *router;
504 505
505 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); 506 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
506 507
507 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) 508 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
508 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); 509 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
509 510
511 /* this is the last reference to this object */
512 router = rcu_dereference_protected(orig_ifinfo->router, true);
513 if (router)
514 batadv_neigh_node_free_ref_now(router);
510 kfree(orig_ifinfo); 515 kfree(orig_ifinfo);
511} 516}
512 517
@@ -702,6 +707,47 @@ free_orig_node:
702} 707}
703 708
704/** 709/**
710 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
711 * @bat_priv: the bat priv with all the soft interface information
712 * @neigh: orig node which is to be checked
713 */
714static void
715batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
716 struct batadv_neigh_node *neigh)
717{
718 struct batadv_neigh_ifinfo *neigh_ifinfo;
719 struct batadv_hard_iface *if_outgoing;
720 struct hlist_node *node_tmp;
721
722 spin_lock_bh(&neigh->ifinfo_lock);
723
724 /* for all ifinfo objects for this neighinator */
725 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
726 &neigh->ifinfo_list, list) {
727 if_outgoing = neigh_ifinfo->if_outgoing;
728
729 /* always keep the default interface */
730 if (if_outgoing == BATADV_IF_DEFAULT)
731 continue;
732
733 /* don't purge if the interface is not (going) down */
734 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
735 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
736 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
737 continue;
738
739 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
740 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
741 neigh->addr, if_outgoing->net_dev->name);
742
743 hlist_del_rcu(&neigh_ifinfo->list);
744 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
745 }
746
747 spin_unlock_bh(&neigh->ifinfo_lock);
748}
749
750/**
705 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator 751 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
706 * @bat_priv: the bat priv with all the soft interface information 752 * @bat_priv: the bat priv with all the soft interface information
707 * @orig_node: orig node which is to be checked 753 * @orig_node: orig node which is to be checked
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
800 846
801 hlist_del_rcu(&neigh_node->list); 847 hlist_del_rcu(&neigh_node->list);
802 batadv_neigh_node_free_ref(neigh_node); 848 batadv_neigh_node_free_ref(neigh_node);
849 } else {
850 /* only necessary if not the whole neighbor is to be
851 * deleted, but some interface has been removed.
852 */
853 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
803 } 854 }
804 } 855 }
805 856
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
857{ 908{
858 struct batadv_neigh_node *best_neigh_node; 909 struct batadv_neigh_node *best_neigh_node;
859 struct batadv_hard_iface *hard_iface; 910 struct batadv_hard_iface *hard_iface;
860 bool changed; 911 bool changed_ifinfo, changed_neigh;
861 912
862 if (batadv_has_timed_out(orig_node->last_seen, 913 if (batadv_has_timed_out(orig_node->last_seen,
863 2 * BATADV_PURGE_TIMEOUT)) { 914 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
867 jiffies_to_msecs(orig_node->last_seen)); 918 jiffies_to_msecs(orig_node->last_seen));
868 return true; 919 return true;
869 } 920 }
870 changed = batadv_purge_orig_ifinfo(bat_priv, orig_node); 921 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
871 changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node); 922 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
872 923
873 if (!changed) 924 if (!changed_ifinfo && !changed_neigh)
874 return false; 925 return false;
875 926
876 /* first for NULL ... */ 927 /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1028 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); 1079 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1029 1080
1030out: 1081out:
1031 batadv_hardif_free_ref(hard_iface); 1082 if (hard_iface)
1083 batadv_hardif_free_ref(hard_iface);
1032 return 0; 1084 return 0;
1033} 1085}
1034 1086
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 80e1b0f60a30..2acf7fa1fec6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
859 return NF_STOLEN; 859 return NF_STOLEN;
860} 860}
861 861
862#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) 862#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
863static int br_nf_dev_queue_xmit(struct sk_buff *skb) 863static int br_nf_dev_queue_xmit(struct sk_buff *skb)
864{ 864{
865 int ret; 865 int ret;
866 866
867 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && 867 if (skb->protocol == htons(ETH_P_IP) &&
868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
869 !skb_is_gso(skb)) { 869 !skb_is_gso(skb)) {
870 if (br_parse_ip_options(skb)) 870 if (br_parse_ip_options(skb))
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dac7f9b98687..1948d592aa54 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
557 return r; 557 return r;
558} 558}
559 559
560static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 560static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
561 int offset, size_t size, bool more) 561 int offset, size_t size, bool more)
562{ 562{
563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
570 return ret; 570 return ret;
571} 571}
572 572
573static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
574 int offset, size_t size, bool more)
575{
576 int ret;
577 struct kvec iov;
578
579 /* sendpage cannot properly handle pages with page_count == 0,
580 * we need to fallback to sendmsg if that's the case */
581 if (page_count(page) >= 1)
582 return __ceph_tcp_sendpage(sock, page, offset, size, more);
583
584 iov.iov_base = kmap(page) + offset;
585 iov.iov_len = size;
586 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
587 kunmap(page);
588
589 return ret;
590}
573 591
574/* 592/*
575 * Shutdown/close the socket for the given connection. 593 * Shutdown/close the socket for the given connection.
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 8b8a5a24b223..c547e46084d3 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
329 dout("crush decode tunable chooseleaf_descend_once = %d", 329 dout("crush decode tunable chooseleaf_descend_once = %d",
330 c->chooseleaf_descend_once); 330 c->chooseleaf_descend_once);
331 331
332 ceph_decode_need(p, end, sizeof(u8), done);
333 c->chooseleaf_vary_r = ceph_decode_8(p);
334 dout("crush decode tunable chooseleaf_vary_r = %d",
335 c->chooseleaf_vary_r);
336
332done: 337done:
333 dout("crush_decode success\n"); 338 dout("crush_decode success\n");
334 return c; 339 return c;
diff --git a/net/core/dev.c b/net/core/dev.c
index d2c8a06b3a98..9abc503b19b7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2418 * 2. No high memory really exists on this machine. 2418 * 2. No high memory really exists on this machine.
2419 */ 2419 */
2420 2420
2421static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2421static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2422{ 2422{
2423#ifdef CONFIG_HIGHMEM 2423#ifdef CONFIG_HIGHMEM
2424 int i; 2424 int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2493} 2493}
2494 2494
2495static netdev_features_t harmonize_features(struct sk_buff *skb, 2495static netdev_features_t harmonize_features(struct sk_buff *skb,
2496 const struct net_device *dev, 2496 netdev_features_t features)
2497 netdev_features_t features)
2498{ 2497{
2499 int tmp; 2498 int tmp;
2500 2499
2501 if (skb->ip_summed != CHECKSUM_NONE && 2500 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2501 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2502 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2503 } else if (illegal_highdma(skb->dev, skb)) {
2505 features &= ~NETIF_F_SG; 2504 features &= ~NETIF_F_SG;
2506 } 2505 }
2507 2506
2508 return features; 2507 return features;
2509} 2508}
2510 2509
2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2510netdev_features_t netif_skb_features(struct sk_buff *skb)
2512 const struct net_device *dev)
2513{ 2511{
2514 __be16 protocol = skb->protocol; 2512 __be16 protocol = skb->protocol;
2515 netdev_features_t features = dev->features; 2513 netdev_features_t features = skb->dev->features;
2516 2514
2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2518 features &= ~NETIF_F_GSO_MASK; 2516 features &= ~NETIF_F_GSO_MASK;
2519 2517
2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2522 protocol = veh->h_vlan_encapsulated_proto; 2520 protocol = veh->h_vlan_encapsulated_proto;
2523 } else if (!vlan_tx_tag_present(skb)) { 2521 } else if (!vlan_tx_tag_present(skb)) {
2524 return harmonize_features(skb, dev, features); 2522 return harmonize_features(skb, features);
2525 } 2523 }
2526 2524
2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2528 NETIF_F_HW_VLAN_STAG_TX); 2526 NETIF_F_HW_VLAN_STAG_TX);
2529 2527
2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2533 NETIF_F_HW_VLAN_STAG_TX; 2531 NETIF_F_HW_VLAN_STAG_TX;
2534 2532
2535 return harmonize_features(skb, dev, features); 2533 return harmonize_features(skb, features);
2536} 2534}
2537EXPORT_SYMBOL(netif_skb_dev_features); 2535EXPORT_SYMBOL(netif_skb_features);
2538 2536
2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2540 struct netdev_queue *txq) 2538 struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3953 } 3951 }
3954 NAPI_GRO_CB(skb)->count = 1; 3952 NAPI_GRO_CB(skb)->count = 1;
3955 NAPI_GRO_CB(skb)->age = jiffies; 3953 NAPI_GRO_CB(skb)->age = jiffies;
3954 NAPI_GRO_CB(skb)->last = skb;
3956 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3955 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3957 skb->next = napi->gro_list; 3956 skb->next = napi->gro_list;
3958 napi->gro_list = skb; 3957 napi->gro_list = skb;
@@ -4543,6 +4542,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
4543EXPORT_SYMBOL(netdev_adjacent_get_private); 4542EXPORT_SYMBOL(netdev_adjacent_get_private);
4544 4543
4545/** 4544/**
4545 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4546 * @dev: device
4547 * @iter: list_head ** of the current position
4548 *
4549 * Gets the next device from the dev's upper list, starting from iter
4550 * position. The caller must hold RCU read lock.
4551 */
4552struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4553 struct list_head **iter)
4554{
4555 struct netdev_adjacent *upper;
4556
4557 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4558
4559 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4560
4561 if (&upper->list == &dev->adj_list.upper)
4562 return NULL;
4563
4564 *iter = &upper->list;
4565
4566 return upper->dev;
4567}
4568EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4569
4570/**
4546 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4571 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4547 * @dev: device 4572 * @dev: device
4548 * @iter: list_head ** of the current position 4573 * @iter: list_head ** of the current position
@@ -4624,6 +4649,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4624EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4649EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4625 4650
4626/** 4651/**
4652 * netdev_lower_get_next - Get the next device from the lower neighbour
4653 * list
4654 * @dev: device
4655 * @iter: list_head ** of the current position
4656 *
4657 * Gets the next netdev_adjacent from the dev's lower neighbour
4658 * list, starting from iter position. The caller must hold RTNL lock or
4659 * its own locking that guarantees that the neighbour lower
4660 * list will remain unchainged.
4661 */
4662void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4663{
4664 struct netdev_adjacent *lower;
4665
4666 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4667
4668 if (&lower->list == &dev->adj_list.lower)
4669 return NULL;
4670
4671 *iter = &lower->list;
4672
4673 return lower->dev;
4674}
4675EXPORT_SYMBOL(netdev_lower_get_next);
4676
4677/**
4627 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4678 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4628 * lower neighbour list, RCU 4679 * lower neighbour list, RCU
4629 * variant 4680 * variant
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
5073} 5124}
5074EXPORT_SYMBOL(netdev_lower_dev_get_private); 5125EXPORT_SYMBOL(netdev_lower_dev_get_private);
5075 5126
5127
5128int dev_get_nest_level(struct net_device *dev,
5129 bool (*type_check)(struct net_device *dev))
5130{
5131 struct net_device *lower = NULL;
5132 struct list_head *iter;
5133 int max_nest = -1;
5134 int nest;
5135
5136 ASSERT_RTNL();
5137
5138 netdev_for_each_lower_dev(dev, lower, iter) {
5139 nest = dev_get_nest_level(lower, type_check);
5140 if (max_nest < nest)
5141 max_nest = nest;
5142 }
5143
5144 if (type_check(dev))
5145 max_nest++;
5146
5147 return max_nest;
5148}
5149EXPORT_SYMBOL(dev_get_nest_level);
5150
5076static void dev_change_rx_flags(struct net_device *dev, int flags) 5151static void dev_change_rx_flags(struct net_device *dev, int flags)
5077{ 5152{
5078 const struct net_device_ops *ops = dev->netdev_ops; 5153 const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
5238 if (ops->ndo_set_rx_mode) 5313 if (ops->ndo_set_rx_mode)
5239 ops->ndo_set_rx_mode(dev); 5314 ops->ndo_set_rx_mode(dev);
5240} 5315}
5241EXPORT_SYMBOL(__dev_set_rx_mode);
5242 5316
5243void dev_set_rx_mode(struct net_device *dev) 5317void dev_set_rx_mode(struct net_device *dev)
5244{ 5318{
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
5543 5617
5544/* Delayed registration/unregisteration */ 5618/* Delayed registration/unregisteration */
5545static LIST_HEAD(net_todo_list); 5619static LIST_HEAD(net_todo_list);
5546static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5620DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5547 5621
5548static void net_set_todo(struct net_device *dev) 5622static void net_set_todo(struct net_device *dev)
5549{ 5623{
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8f8a96ef9f3f..32d872eec7f5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
1248 neigh->updated = jiffies; 1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED)) 1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return; 1250 return;
1251 neigh->nud_state = NUD_PROBE; 1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh, 1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255} 1255}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 81d3a9a08453..7c8ffd974961 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,7 @@
24 24
25static LIST_HEAD(pernet_list); 25static LIST_HEAD(pernet_list);
26static struct list_head *first_device = &pernet_list; 26static struct list_head *first_device = &pernet_list;
27static DEFINE_MUTEX(net_mutex); 27DEFINE_MUTEX(net_mutex);
28 28
29LIST_HEAD(net_namespace_list); 29LIST_HEAD(net_namespace_list);
30EXPORT_SYMBOL_GPL(net_namespace_list); 30EXPORT_SYMBOL_GPL(net_namespace_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9837bebf93ce..2d8d8fcfa060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
353} 353}
354EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 354EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
355 355
356/* Return with the rtnl_lock held when there are no network
357 * devices unregistering in any network namespace.
358 */
359static void rtnl_lock_unregistering_all(void)
360{
361 struct net *net;
362 bool unregistering;
363 DEFINE_WAIT(wait);
364
365 for (;;) {
366 prepare_to_wait(&netdev_unregistering_wq, &wait,
367 TASK_UNINTERRUPTIBLE);
368 unregistering = false;
369 rtnl_lock();
370 for_each_net(net) {
371 if (net->dev_unreg_count > 0) {
372 unregistering = true;
373 break;
374 }
375 }
376 if (!unregistering)
377 break;
378 __rtnl_unlock();
379 schedule();
380 }
381 finish_wait(&netdev_unregistering_wq, &wait);
382}
383
356/** 384/**
357 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 385 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
358 * @ops: struct rtnl_link_ops * to unregister 386 * @ops: struct rtnl_link_ops * to unregister
359 */ 387 */
360void rtnl_link_unregister(struct rtnl_link_ops *ops) 388void rtnl_link_unregister(struct rtnl_link_ops *ops)
361{ 389{
362 rtnl_lock(); 390 /* Close the race with cleanup_net() */
391 mutex_lock(&net_mutex);
392 rtnl_lock_unregistering_all();
363 __rtnl_link_unregister(ops); 393 __rtnl_link_unregister(ops);
364 rtnl_unlock(); 394 rtnl_unlock();
395 mutex_unlock(&net_mutex);
365} 396}
366EXPORT_SYMBOL_GPL(rtnl_link_unregister); 397EXPORT_SYMBOL_GPL(rtnl_link_unregister);
367 398
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1b62343f5837..8383b2bddeb9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3076 if (unlikely(p->len + len >= 65536)) 3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG; 3077 return -E2BIG;
3078 3078
3079 lp = NAPI_GRO_CB(p)->last ?: p; 3079 lp = NAPI_GRO_CB(p)->last;
3080 pinfo = skb_shinfo(lp); 3080 pinfo = skb_shinfo(lp);
3081 3081
3082 if (headlen <= offset) { 3082 if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
3192 3192
3193 __skb_pull(skb, offset); 3193 __skb_pull(skb, offset);
3194 3194
3195 if (!NAPI_GRO_CB(p)->last) 3195 if (NAPI_GRO_CB(p)->last == p)
3196 skb_shinfo(p)->frag_list = skb; 3196 skb_shinfo(p)->frag_list = skb;
3197 else 3197 else
3198 NAPI_GRO_CB(p)->last->next = skb; 3198 NAPI_GRO_CB(p)->last->next = skb;
diff --git a/net/core/utils.c b/net/core/utils.c
index 2f737bf90b3f..eed34338736c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
348{ 348{
349 struct __net_random_once_work *work = 349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work); 350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key)) 351 BUG_ON(!static_key_enabled(work->key));
352 static_key_slow_inc(work->key); 352 static_key_slow_dec(work->key);
353 kfree(work); 353 kfree(work);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
367} 367}
368 368
369bool __net_get_random_once(void *buf, int nbytes, bool *done, 369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key) 370 struct static_key *once_key)
371{ 371{
372 static DEFINE_SPINLOCK(lock); 372 static DEFINE_SPINLOCK(lock);
373 unsigned long flags; 373 unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
382 *done = true; 382 *done = true;
383 spin_unlock_irqrestore(&lock, flags); 383 spin_unlock_irqrestore(&lock, flags);
384 384
385 __net_random_once_disable_jump(done_key); 385 __net_random_once_disable_jump(once_key);
386 386
387 return true; 387 return true;
388} 388}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0eb5d5e76dfb..5db37cef50a9 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
406 goto out_free; 406 goto out_free;
407 } 407 }
408 408
409 chip_index = 0; 409 chip_index = -1;
410 for_each_available_child_of_node(np, child) { 410 for_each_available_child_of_node(np, child) {
411 chip_index++;
411 cd = &pd->chip[chip_index]; 412 cd = &pd->chip[chip_index];
412 413
413 cd->mii_bus = &mdio_bus->dev; 414 cd->mii_bus = &mdio_bus->dev;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8c54870db792..6d6dd345bc4d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
1650 return register_pernet_subsys(&ipv4_mib_ops); 1650 return register_pernet_subsys(&ipv4_mib_ops);
1651} 1651}
1652 1652
1653static __net_init int inet_init_net(struct net *net)
1654{
1655 /*
1656 * Set defaults for local port range
1657 */
1658 seqlock_init(&net->ipv4.ip_local_ports.lock);
1659 net->ipv4.ip_local_ports.range[0] = 32768;
1660 net->ipv4.ip_local_ports.range[1] = 61000;
1661
1662 seqlock_init(&net->ipv4.ping_group_range.lock);
1663 /*
1664 * Sane defaults - nobody may create ping sockets.
1665 * Boot scripts should set this to distro-specific group.
1666 */
1667 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1668 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1669 return 0;
1670}
1671
1672static __net_exit void inet_exit_net(struct net *net)
1673{
1674}
1675
1676static __net_initdata struct pernet_operations af_inet_ops = {
1677 .init = inet_init_net,
1678 .exit = inet_exit_net,
1679};
1680
1681static int __init init_inet_pernet_ops(void)
1682{
1683 return register_pernet_subsys(&af_inet_ops);
1684}
1685
1653static int ipv4_proc_init(void); 1686static int ipv4_proc_init(void);
1654 1687
1655/* 1688/*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
1794 if (ip_mr_init()) 1827 if (ip_mr_init())
1795 pr_crit("%s: Cannot init ipv4 mroute\n", __func__); 1828 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1796#endif 1829#endif
1830
1831 if (init_inet_pernet_ops())
1832 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1797 /* 1833 /*
1798 * Initialise per-cpu ipv4 mibs 1834 * Initialise per-cpu ipv4 mibs
1799 */ 1835 */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8a043f03c88e..b10cd43a4722 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
822 if (fi == NULL) 822 if (fi == NULL)
823 goto failure; 823 goto failure;
824 fib_info_cnt++;
824 if (cfg->fc_mx) { 825 if (cfg->fc_mx) {
825 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 826 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
826 if (!fi->fib_metrics) 827 if (!fi->fib_metrics)
827 goto failure; 828 goto failure;
828 } else 829 } else
829 fi->fib_metrics = (u32 *) dst_default_metrics; 830 fi->fib_metrics = (u32 *) dst_default_metrics;
830 fib_info_cnt++;
831 831
832 fi->fib_net = hold_net(net); 832 fi->fib_net = hold_net(net);
833 fi->fib_protocol = cfg->fc_protocol; 833 fi->fib_protocol = cfg->fc_protocol;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 0d1e2cb877ec..a56b8e6e866a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
37 unsigned int seq; 37 unsigned int seq;
38 38
39 do { 39 do {
40 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 40 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
41 41
42 *low = net->ipv4.sysctl_local_ports.range[0]; 42 *low = net->ipv4.ip_local_ports.range[0];
43 *high = net->ipv4.sysctl_local_ports.range[1]; 43 *high = net->ipv4.ip_local_ports.range[1];
44 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 44 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
45} 45}
46EXPORT_SYMBOL(inet_get_local_port_range); 46EXPORT_SYMBOL(inet_get_local_port_range);
47 47
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index be8abe73bb9f..6f111e48e11c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -42,12 +42,12 @@
42static bool ip_may_fragment(const struct sk_buff *skb) 42static bool ip_may_fragment(const struct sk_buff *skb)
43{ 43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 !skb->local_df; 45 skb->local_df;
46} 46}
47 47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
49{ 49{
50 if (skb->len <= mtu || skb->local_df) 50 if (skb->len <= mtu)
51 return false; 51 return false;
52 52
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
56 return true; 56 return true;
57} 57}
58 58
59static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
60{
61 unsigned int mtu;
62
63 if (skb->local_df || !skb_is_gso(skb))
64 return false;
65
66 mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
67
68 /* if seglen > mtu, do software segmentation for IP fragmentation on
69 * output. DF bit cannot be set since ip_forward would have sent
70 * icmp error.
71 */
72 return skb_gso_network_seglen(skb) > mtu;
73}
74
75/* called if GSO skb needs to be fragmented on forward */
76static int ip_forward_finish_gso(struct sk_buff *skb)
77{
78 struct dst_entry *dst = skb_dst(skb);
79 netdev_features_t features;
80 struct sk_buff *segs;
81 int ret = 0;
82
83 features = netif_skb_dev_features(skb, dst->dev);
84 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
85 if (IS_ERR(segs)) {
86 kfree_skb(skb);
87 return -ENOMEM;
88 }
89
90 consume_skb(skb);
91
92 do {
93 struct sk_buff *nskb = segs->next;
94 int err;
95
96 segs->next = NULL;
97 err = dst_output(segs);
98
99 if (err && ret == 0)
100 ret = err;
101 segs = nskb;
102 } while (segs);
103
104 return ret;
105}
106 59
107static int ip_forward_finish(struct sk_buff *skb) 60static int ip_forward_finish(struct sk_buff *skb)
108{ 61{
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
114 if (unlikely(opt->optlen)) 67 if (unlikely(opt->optlen))
115 ip_forward_options(skb); 68 ip_forward_options(skb);
116 69
117 if (ip_gso_exceeds_dst_mtu(skb))
118 return ip_forward_finish_gso(skb);
119
120 return dst_output(skb); 70 return dst_output(skb);
121} 71}
122 72
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index c10a3ce5cbff..ed32313e307c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
232 * "Fragment Reassembly Timeout" message, per RFC792. 232 * "Fragment Reassembly Timeout" message, per RFC792.
233 */ 233 */
234 if (qp->user == IP_DEFRAG_AF_PACKET || 234 if (qp->user == IP_DEFRAG_AF_PACKET ||
235 (qp->user == IP_DEFRAG_CONNTRACK_IN && 235 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
236 skb_rtable(head)->rt_type != RTN_LOCAL)) 236 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
237 (skb_rtable(head)->rt_type != RTN_LOCAL)))
237 goto out_rcu_unlock; 238 goto out_rcu_unlock;
238 239
239 240
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1cbeba5edff9..a52f50187b54 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static int ip_finish_output_gso(struct sk_buff *skb)
215{
216 netdev_features_t features;
217 struct sk_buff *segs;
218 int ret = 0;
219
220 /* common case: locally created skb or seglen is <= mtu */
221 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
222 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
223 return ip_finish_output2(skb);
224
225 /* Slowpath - GSO segment length is exceeding the dst MTU.
226 *
227 * This can happen in two cases:
228 * 1) TCP GRO packet, DF bit not set
229 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
230 * from host network stack.
231 */
232 features = netif_skb_features(skb);
233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
234 if (IS_ERR(segs)) {
235 kfree_skb(skb);
236 return -ENOMEM;
237 }
238
239 consume_skb(skb);
240
241 do {
242 struct sk_buff *nskb = segs->next;
243 int err;
244
245 segs->next = NULL;
246 err = ip_fragment(segs, ip_finish_output2);
247
248 if (err && ret == 0)
249 ret = err;
250 segs = nskb;
251 } while (segs);
252
253 return ret;
254}
255
214static int ip_finish_output(struct sk_buff *skb) 256static int ip_finish_output(struct sk_buff *skb)
215{ 257{
216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 258#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
220 return dst_output(skb); 262 return dst_output(skb);
221 } 263 }
222#endif 264#endif
223 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb)) 265 if (skb_is_gso(skb))
266 return ip_finish_output_gso(skb);
267
268 if (skb->len > ip_skb_dst_mtu(skb))
224 return ip_fragment(skb, ip_finish_output2); 269 return ip_fragment(skb, ip_finish_output2);
225 else 270
226 return ip_finish_output2(skb); 271 return ip_finish_output2(skb);
227} 272}
228 273
229int ip_mc_output(struct sock *sk, struct sk_buff *skb) 274int ip_mc_output(struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b3f859731c60..2acc2337d38b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
540 unsigned int max_headroom; /* The extra header space needed */ 540 unsigned int max_headroom; /* The extra header space needed */
541 __be32 dst; 541 __be32 dst;
542 int err; 542 int err;
543 bool connected = true; 543 bool connected;
544 544
545 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 545 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
546 connected = (tunnel->parms.iph.daddr != 0);
546 547
547 dst = tnl_params->daddr; 548 dst = tnl_params->daddr;
548 if (dst == 0) { 549 if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
882 */ 883 */
883 if (!IS_ERR(itn->fb_tunnel_dev)) { 884 if (!IS_ERR(itn->fb_tunnel_dev)) {
884 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 885 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
886 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
885 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); 887 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
886 } 888 }
887 rtnl_unlock(); 889 rtnl_unlock();
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index afcee51b90ed..13ef00f1e17b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
239static int vti4_err(struct sk_buff *skb, u32 info) 239static int vti4_err(struct sk_buff *skb, u32 info)
240{ 240{
241 __be32 spi; 241 __be32 spi;
242 __u32 mark;
242 struct xfrm_state *x; 243 struct xfrm_state *x;
243 struct ip_tunnel *tunnel; 244 struct ip_tunnel *tunnel;
244 struct ip_esp_hdr *esph; 245 struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
254 if (!tunnel) 255 if (!tunnel)
255 return -1; 256 return -1;
256 257
258 mark = be32_to_cpu(tunnel->parms.o_key);
259
257 switch (protocol) { 260 switch (protocol) {
258 case IPPROTO_ESP: 261 case IPPROTO_ESP:
259 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 262 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
281 return 0; 284 return 0;
282 } 285 }
283 286
284 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 287 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
285 spi, protocol, AF_INET); 288 spi, protocol, AF_INET);
286 if (!x) 289 if (!x)
287 return 0; 290 return 0;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 12e13bd82b5b..f40f321b41fc 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -22,7 +22,6 @@
22#endif 22#endif
23#include <net/netfilter/nf_conntrack_zones.h> 23#include <net/netfilter/nf_conntrack_zones.h>
24 24
25/* Returns new sk_buff, or NULL */
26static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) 25static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
27{ 26{
28 int err; 27 int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
33 err = ip_defrag(skb, user); 32 err = ip_defrag(skb, user);
34 local_bh_enable(); 33 local_bh_enable();
35 34
36 if (!err) 35 if (!err) {
37 ip_send_check(ip_hdr(skb)); 36 ip_send_check(ip_hdr(skb));
37 skb->local_df = 1;
38 }
38 39
39 return err; 40 return err;
40} 41}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8210964a9f19..044a0ddf6a79 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -236,15 +236,15 @@ exit:
236static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, 236static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
237 kgid_t *high) 237 kgid_t *high)
238{ 238{
239 kgid_t *data = net->ipv4.sysctl_ping_group_range; 239 kgid_t *data = net->ipv4.ping_group_range.range;
240 unsigned int seq; 240 unsigned int seq;
241 241
242 do { 242 do {
243 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 243 seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
244 244
245 *low = data[0]; 245 *low = data[0];
246 *high = data[1]; 246 *high = data[1];
247 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 247 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
248} 248}
249 249
250 250
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index db1e0da871f4..5e676be3daeb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
1519 struct in_device *out_dev; 1519 struct in_device *out_dev;
1520 unsigned int flags = 0; 1520 unsigned int flags = 0;
1521 bool do_cache; 1521 bool do_cache;
1522 u32 itag; 1522 u32 itag = 0;
1523 1523
1524 /* get a working reference to the output device */ 1524 /* get a working reference to the output device */
1525 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 1525 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 44eba052b43d..5cde8f263d40 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
45/* Update system visible IP port range */ 45/* Update system visible IP port range */
46static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
47{ 47{
48 write_seqlock(&net->ipv4.sysctl_local_ports.lock); 48 write_seqlock(&net->ipv4.ip_local_ports.lock);
49 net->ipv4.sysctl_local_ports.range[0] = range[0]; 49 net->ipv4.ip_local_ports.range[0] = range[0];
50 net->ipv4.sysctl_local_ports.range[1] = range[1]; 50 net->ipv4.ip_local_ports.range[1] = range[1];
51 write_sequnlock(&net->ipv4.sysctl_local_ports.lock); 51 write_sequnlock(&net->ipv4.ip_local_ports.lock);
52} 52}
53 53
54/* Validate changes from /proc interface. */ 54/* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
57 size_t *lenp, loff_t *ppos) 57 size_t *lenp, loff_t *ppos)
58{ 58{
59 struct net *net = 59 struct net *net =
60 container_of(table->data, struct net, ipv4.sysctl_local_ports.range); 60 container_of(table->data, struct net, ipv4.ip_local_ports.range);
61 int ret; 61 int ret;
62 int range[2]; 62 int range[2];
63 struct ctl_table tmp = { 63 struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
87{ 87{
88 kgid_t *data = table->data; 88 kgid_t *data = table->data;
89 struct net *net = 89 struct net *net =
90 container_of(table->data, struct net, ipv4.sysctl_ping_group_range); 90 container_of(table->data, struct net, ipv4.ping_group_range.range);
91 unsigned int seq; 91 unsigned int seq;
92 do { 92 do {
93 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 93 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
94 94
95 *low = data[0]; 95 *low = data[0];
96 *high = data[1]; 96 *high = data[1];
97 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 97 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
98} 98}
99 99
100/* Update system visible IP port range */ 100/* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
102{ 102{
103 kgid_t *data = table->data; 103 kgid_t *data = table->data;
104 struct net *net = 104 struct net *net =
105 container_of(table->data, struct net, ipv4.sysctl_ping_group_range); 105 container_of(table->data, struct net, ipv4.ping_group_range.range);
106 write_seqlock(&net->ipv4.sysctl_local_ports.lock); 106 write_seqlock(&net->ipv4.ip_local_ports.lock);
107 data[0] = low; 107 data[0] = low;
108 data[1] = high; 108 data[1] = high;
109 write_sequnlock(&net->ipv4.sysctl_local_ports.lock); 109 write_sequnlock(&net->ipv4.ip_local_ports.lock);
110} 110}
111 111
112/* Validate changes from /proc interface. */ 112/* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
805 }, 805 },
806 { 806 {
807 .procname = "ping_group_range", 807 .procname = "ping_group_range",
808 .data = &init_net.ipv4.sysctl_ping_group_range, 808 .data = &init_net.ipv4.ping_group_range.range,
809 .maxlen = sizeof(gid_t)*2, 809 .maxlen = sizeof(gid_t)*2,
810 .mode = 0644, 810 .mode = 0644,
811 .proc_handler = ipv4_ping_group_range, 811 .proc_handler = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
819 }, 819 },
820 { 820 {
821 .procname = "ip_local_port_range", 821 .procname = "ip_local_port_range",
822 .maxlen = sizeof(init_net.ipv4.sysctl_local_ports.range), 822 .maxlen = sizeof(init_net.ipv4.ip_local_ports.range),
823 .data = &init_net.ipv4.sysctl_local_ports.range, 823 .data = &init_net.ipv4.ip_local_ports.range,
824 .mode = 0644, 824 .mode = 0644,
825 .proc_handler = ipv4_local_port_range, 825 .proc_handler = ipv4_local_port_range,
826 }, 826 },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
858 table[i].data += (void *)net - (void *)&init_net; 858 table[i].data += (void *)net - (void *)&init_net;
859 } 859 }
860 860
861 /*
862 * Sane defaults - nobody may create ping sockets.
863 * Boot scripts should set this to distro-specific group.
864 */
865 net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
866 net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
867
868 /*
869 * Set defaults for local port range
870 */
871 seqlock_init(&net->ipv4.sysctl_local_ports.lock);
872 net->ipv4.sysctl_local_ports.range[0] = 32768;
873 net->ipv4.sysctl_local_ports.range[1] = 61000;
874
875 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); 861 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
876 if (net->ipv4.ipv4_hdr == NULL) 862 if (net->ipv4.ipv4_hdr == NULL)
877 goto err_reg; 863 goto err_reg;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 40e701f2e1e0..186a8ecf92fa 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
62 if (err) 62 if (err)
63 return err; 63 return err;
64 64
65 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
66 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
67
68 skb->protocol = htons(ETH_P_IP);
69 66
70 return x->outer_mode->output2(x, skb); 67 return x->outer_mode->output2(x, skb);
71} 68}
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
73 70
74int xfrm4_output_finish(struct sk_buff *skb) 71int xfrm4_output_finish(struct sk_buff *skb)
75{ 72{
73 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
74 skb->protocol = htons(ETH_P_IP);
75
76#ifdef CONFIG_NETFILTER
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
78#endif
79
80 return xfrm_output(skb);
81}
82
83static int __xfrm4_output(struct sk_buff *skb)
84{
85 struct xfrm_state *x = skb_dst(skb)->xfrm;
86
76#ifdef CONFIG_NETFILTER 87#ifdef CONFIG_NETFILTER
77 if (!skb_dst(skb)->xfrm) { 88 if (!x) {
78 IPCB(skb)->flags |= IPSKB_REROUTED; 89 IPCB(skb)->flags |= IPSKB_REROUTED;
79 return dst_output(skb); 90 return dst_output(skb);
80 } 91 }
81
82 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
83#endif 92#endif
84 93
85 skb->protocol = htons(ETH_P_IP); 94 return x->outer_mode->afinfo->output_finish(skb);
86 return xfrm_output(skb);
87} 95}
88 96
89int xfrm4_output(struct sock *sk, struct sk_buff *skb) 97int xfrm4_output(struct sock *sk, struct sk_buff *skb)
90{ 98{
91 struct dst_entry *dst = skb_dst(skb);
92 struct xfrm_state *x = dst->xfrm;
93
94 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, 99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
95 NULL, dst->dev, 100 NULL, skb_dst(skb)->dev, __xfrm4_output,
96 x->outer_mode->afinfo->output_finish,
97 !(IPCB(skb)->flags & IPSKB_REROUTED)); 101 !(IPCB(skb)->flags & IPSKB_REROUTED));
98} 102}
99 103
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 7f7b243e8139..a2ce0101eaac 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
50{ 50{
51 int ret; 51 int ret;
52 struct xfrm4_protocol *handler; 52 struct xfrm4_protocol *handler;
53 struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
53 54
54 for_each_protocol_rcu(*proto_handlers(protocol), handler) 55 if (!head)
56 return 0;
57
58 for_each_protocol_rcu(*head, handler)
55 if ((ret = handler->cb_handler(skb, err)) <= 0) 59 if ((ret = handler->cb_handler(skb, err)) <= 0)
56 return ret; 60 return ret;
57 61
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
64{ 68{
65 int ret; 69 int ret;
66 struct xfrm4_protocol *handler; 70 struct xfrm4_protocol *handler;
71 struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
67 72
68 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
69 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 74 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
70 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
71 76
72 for_each_protocol_rcu(*proto_handlers(nexthdr), handler) 77 if (!head)
78 goto out;
79
80 for_each_protocol_rcu(*head, handler)
73 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) 81 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
74 return ret; 82 return ret;
75 83
84out:
76 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 85 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
77 86
78 kfree_skb(skb); 87 kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
208 int ret = -EEXIST; 217 int ret = -EEXIST;
209 int priority = handler->priority; 218 int priority = handler->priority;
210 219
220 if (!proto_handlers(protocol) || !netproto(protocol))
221 return -EINVAL;
222
211 mutex_lock(&xfrm4_protocol_mutex); 223 mutex_lock(&xfrm4_protocol_mutex);
212 224
213 if (!rcu_dereference_protected(*proto_handlers(protocol), 225 if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
250 struct xfrm4_protocol *t; 262 struct xfrm4_protocol *t;
251 int ret = -ENOENT; 263 int ret = -ENOENT;
252 264
265 if (!proto_handlers(protocol) || !netproto(protocol))
266 return -EINVAL;
267
253 mutex_lock(&xfrm4_protocol_mutex); 268 mutex_lock(&xfrm4_protocol_mutex);
254 269
255 for (pprev = proto_handlers(protocol); 270 for (pprev = proto_handlers(protocol);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 59f95affceb0..b2f091566f88 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
196 unsigned int off; 196 unsigned int off;
197 u16 flush = 1; 197 u16 flush = 1;
198 int proto; 198 int proto;
199 __wsum csum;
200 199
201 off = skb_gro_offset(skb); 200 off = skb_gro_offset(skb);
202 hlen = off + sizeof(*iph); 201 hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
264 263
265 NAPI_GRO_CB(skb)->flush |= flush; 264 NAPI_GRO_CB(skb)->flush |= flush;
266 265
267 csum = skb->csum; 266 skb_gro_postpull_rcsum(skb, iph, nlen);
268 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
269 267
270 pp = ops->callbacks.gro_receive(head, skb); 268 pp = ops->callbacks.gro_receive(head, skb);
271 269
272 skb->csum = csum;
273
274out_unlock: 270out_unlock:
275 rcu_read_unlock(); 271 rcu_read_unlock();
276 272
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 40e7581374f7..fbf11562b54c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
344 344
345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) 345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346{ 346{
347 if (skb->len <= mtu || skb->local_df) 347 if (skb->len <= mtu)
348 return false; 348 return false;
349 349
350 /* ipv6 conntrack defrag sets max_frag_size + local_df */
350 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 return true; 352 return true;
352 353
354 if (skb->local_df)
355 return false;
356
353 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 return false; 358 return false;
355 359
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1225 unsigned int maxnonfragsize, headersize; 1229 unsigned int maxnonfragsize, headersize;
1226 1230
1227 headersize = sizeof(struct ipv6hdr) + 1231 headersize = sizeof(struct ipv6hdr) +
1228 (opt ? opt->tot_len : 0) + 1232 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1229 (dst_allfrag(&rt->dst) ? 1233 (dst_allfrag(&rt->dst) ?
1230 sizeof(struct frag_hdr) : 0) + 1234 sizeof(struct frag_hdr) : 0) +
1231 rt->rt6i_nfheader_len; 1235 rt->rt6i_nfheader_len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b05b609f69d1..f6a66bb4114d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1557{ 1557{
1558 u8 proto; 1558 u8 proto;
1559 1559
1560 if (!data) 1560 if (!data || !data[IFLA_IPTUN_PROTO])
1561 return 0; 1561 return 0;
1562 1562
1563 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1563 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b7c0f827140b..6cc9f9371cc5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
511 u8 type, u8 code, int offset, __be32 info) 511 u8 type, u8 code, int offset, __be32 info)
512{ 512{
513 __be32 spi; 513 __be32 spi;
514 __u32 mark;
514 struct xfrm_state *x; 515 struct xfrm_state *x;
515 struct ip6_tnl *t; 516 struct ip6_tnl *t;
516 struct ip_esp_hdr *esph; 517 struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
524 if (!t) 525 if (!t)
525 return -1; 526 return -1;
526 527
528 mark = be32_to_cpu(t->parms.o_key);
529
527 switch (protocol) { 530 switch (protocol) {
528 case IPPROTO_ESP: 531 case IPPROTO_ESP:
529 esph = (struct ip_esp_hdr *)(skb->data + offset); 532 esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
545 type != NDISC_REDIRECT) 548 type != NDISC_REDIRECT)
546 return 0; 549 return 0;
547 550
548 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 551 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
549 spi, protocol, AF_INET6); 552 spi, protocol, AF_INET6);
550 if (!x) 553 if (!x)
551 return 0; 554 return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
1097 1100
1098 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); 1101 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
1099 if (err < 0) { 1102 if (err < 0) {
1100 unregister_pernet_device(&vti6_net_ops);
1101 pr_err("%s: can't register vti6 protocol\n", __func__); 1103 pr_err("%s: can't register vti6 protocol\n", __func__);
1102 1104
1103 goto out; 1105 goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
1106 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); 1108 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
1107 if (err < 0) { 1109 if (err < 0) {
1108 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1110 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1109 unregister_pernet_device(&vti6_net_ops);
1110 pr_err("%s: can't register vti6 protocol\n", __func__); 1111 pr_err("%s: can't register vti6 protocol\n", __func__);
1111 1112
1112 goto out; 1113 goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
1116 if (err < 0) { 1117 if (err < 0) {
1117 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1118 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
1118 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1119 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1119 unregister_pernet_device(&vti6_net_ops);
1120 pr_err("%s: can't register vti6 protocol\n", __func__); 1120 pr_err("%s: can't register vti6 protocol\n", __func__);
1121 1121
1122 goto out; 1122 goto out;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 09a22f4f36c9..ca8d4ea48a5d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -851,7 +851,7 @@ out:
851static void ndisc_recv_na(struct sk_buff *skb) 851static void ndisc_recv_na(struct sk_buff *skb)
852{ 852{
853 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 853 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
854 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 854 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
855 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 855 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
856 u8 *lladdr = NULL; 856 u8 *lladdr = NULL;
857 u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + 857 u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
944 /* 944 /*
945 * Change: router to host 945 * Change: router to host
946 */ 946 */
947 struct rt6_info *rt; 947 rt6_clean_tohost(dev_net(dev), saddr);
948 rt = rt6_get_dflt_router(saddr, dev);
949 if (rt)
950 ip6_del_rt(rt);
951 } 948 }
952 949
953out: 950out:
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 95f3f1da0d7f..d38e6a8d8b9f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
30 .daddr = iph->daddr, 30 .daddr = iph->daddr,
31 .saddr = iph->saddr, 31 .saddr = iph->saddr,
32 }; 32 };
33 int err;
33 34
34 dst = ip6_route_output(net, skb->sk, &fl6); 35 dst = ip6_route_output(net, skb->sk, &fl6);
35 if (dst->error) { 36 err = dst->error;
37 if (err) {
36 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 38 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
37 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); 39 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
38 dst_release(dst); 40 dst_release(dst);
39 return dst->error; 41 return err;
40 } 42 }
41 43
42 /* Drop old route. */ 44 /* Drop old route. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 004fffb6c221..6ebdb7b6744c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2235} 2235}
2236 2236
2237#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2238#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2239
2240/* Remove routers and update dst entries when gateway turn into host. */
2241static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2242{
2243 struct in6_addr *gateway = (struct in6_addr *)arg;
2244
2245 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2246 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2247 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2248 return -1;
2249 }
2250 return 0;
2251}
2252
2253void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2254{
2255 fib6_clean_all(net, fib6_clean_tohost, gateway);
2256}
2257
2237struct arg_dev_net { 2258struct arg_dev_net {
2238 struct net_device *dev; 2259 struct net_device *dev;
2239 struct net *net; 2260 struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2709 if (tb[RTA_OIF]) 2730 if (tb[RTA_OIF])
2710 oif = nla_get_u32(tb[RTA_OIF]); 2731 oif = nla_get_u32(tb[RTA_OIF]);
2711 2732
2733 if (tb[RTA_MARK])
2734 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2735
2712 if (iif) { 2736 if (iif) {
2713 struct net_device *dev; 2737 struct net_device *dev;
2714 int flags = 0; 2738 int flags = 0;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 0d78132ff18a..8517d3cd1aed 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
42 if (NAPI_GRO_CB(skb)->flush) 42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum; 43 goto skip_csum;
44 44
45 wsum = skb->csum; 45 wsum = NAPI_GRO_CB(skb)->csum;
46 46
47 switch (skb->ip_summed) { 47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE: 48 case CHECKSUM_NONE:
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 19ef329bdbf8..b930d080c66f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 if (err) 114 if (err)
115 return err; 115 return err;
116 116
117 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
118#ifdef CONFIG_NETFILTER
119 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
120#endif
121
122 skb->protocol = htons(ETH_P_IPV6);
123 skb->local_df = 1; 117 skb->local_df = 1;
124 118
125 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
128 122
129int xfrm6_output_finish(struct sk_buff *skb) 123int xfrm6_output_finish(struct sk_buff *skb)
130{ 124{
125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
126 skb->protocol = htons(ETH_P_IPV6);
127
131#ifdef CONFIG_NETFILTER 128#ifdef CONFIG_NETFILTER
132 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 129 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
133#endif 130#endif
134 131
135 skb->protocol = htons(ETH_P_IPV6);
136 return xfrm_output(skb); 132 return xfrm_output(skb);
137} 133}
138 134
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
142 struct xfrm_state *x = dst->xfrm; 138 struct xfrm_state *x = dst->xfrm;
143 int mtu; 139 int mtu;
144 140
141#ifdef CONFIG_NETFILTER
142 if (!x) {
143 IP6CB(skb)->flags |= IP6SKB_REROUTED;
144 return dst_output(skb);
145 }
146#endif
147
145 if (skb->protocol == htons(ETH_P_IPV6)) 148 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb); 149 mtu = ip6_skb_dst_mtu(skb);
147 else 150 else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
165 168
166int xfrm6_output(struct sock *sk, struct sk_buff *skb) 169int xfrm6_output(struct sock *sk, struct sk_buff *skb)
167{ 170{
168 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, 171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
169 skb_dst(skb)->dev, __xfrm6_output); 172 NULL, skb_dst(skb)->dev, __xfrm6_output,
173 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
170} 174}
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index 6ab989c486f7..54d13f8dbbae 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
50{ 50{
51 int ret; 51 int ret;
52 struct xfrm6_protocol *handler; 52 struct xfrm6_protocol *handler;
53 struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
54
55 if (!head)
56 return 0;
53 57
54 for_each_protocol_rcu(*proto_handlers(protocol), handler) 58 for_each_protocol_rcu(*proto_handlers(protocol), handler)
55 if ((ret = handler->cb_handler(skb, err)) <= 0) 59 if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
184 struct xfrm6_protocol __rcu **pprev; 188 struct xfrm6_protocol __rcu **pprev;
185 struct xfrm6_protocol *t; 189 struct xfrm6_protocol *t;
186 bool add_netproto = false; 190 bool add_netproto = false;
187
188 int ret = -EEXIST; 191 int ret = -EEXIST;
189 int priority = handler->priority; 192 int priority = handler->priority;
190 193
194 if (!proto_handlers(protocol) || !netproto(protocol))
195 return -EINVAL;
196
191 mutex_lock(&xfrm6_protocol_mutex); 197 mutex_lock(&xfrm6_protocol_mutex);
192 198
193 if (!rcu_dereference_protected(*proto_handlers(protocol), 199 if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
230 struct xfrm6_protocol *t; 236 struct xfrm6_protocol *t;
231 int ret = -ENOENT; 237 int ret = -ENOENT;
232 238
239 if (!proto_handlers(protocol) || !netproto(protocol))
240 return -EINVAL;
241
233 mutex_lock(&xfrm6_protocol_mutex); 242 mutex_lock(&xfrm6_protocol_mutex);
234 243
235 for (pprev = proto_handlers(protocol); 244 for (pprev = proto_handlers(protocol);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 01e77b0ae075..8c9d7302c846 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1830 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1831 1831
1832 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { 1833 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1834 this = list_skb; 1834 this = list_skb;
1835 break; 1835 break;
1836 } 1836 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 222c28b75315..f169b6ee94ee 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
317 317
318 bool started, abort, hw_begun, notified; 318 bool started, abort, hw_begun, notified;
319 bool to_be_freed; 319 bool to_be_freed;
320 bool on_channel;
320 321
321 unsigned long hw_start_time; 322 unsigned long hw_start_time;
322 323
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index dee50aefd6e8..27600a9808ba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
3598 3598
3599 sdata_lock(sdata); 3599 sdata_lock(sdata);
3600 3600
3601 if (ifmgd->auth_data) { 3601 if (ifmgd->auth_data || ifmgd->assoc_data) {
3602 const u8 *bssid = ifmgd->auth_data ?
3603 ifmgd->auth_data->bss->bssid :
3604 ifmgd->assoc_data->bss->bssid;
3605
3602 /* 3606 /*
3603 * If we are trying to authenticate while suspending, cfg80211 3607 * If we are trying to authenticate / associate while suspending,
3604 * won't know and won't actually abort those attempts, thus we 3608 * cfg80211 won't know and won't actually abort those attempts,
3605 * need to do that ourselves. 3609 * thus we need to do that ourselves.
3606 */ 3610 */
3607 ieee80211_send_deauth_disassoc(sdata, 3611 ieee80211_send_deauth_disassoc(sdata, bssid,
3608 ifmgd->auth_data->bss->bssid,
3609 IEEE80211_STYPE_DEAUTH, 3612 IEEE80211_STYPE_DEAUTH,
3610 WLAN_REASON_DEAUTH_LEAVING, 3613 WLAN_REASON_DEAUTH_LEAVING,
3611 false, frame_buf); 3614 false, frame_buf);
3612 ieee80211_destroy_auth_data(sdata, false); 3615 if (ifmgd->assoc_data)
3616 ieee80211_destroy_assoc_data(sdata, false);
3617 if (ifmgd->auth_data)
3618 ieee80211_destroy_auth_data(sdata, false);
3613 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, 3619 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
3614 IEEE80211_DEAUTH_FRAME_LEN); 3620 IEEE80211_DEAUTH_FRAME_LEN);
3615 } 3621 }
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6fb38558a5e6..7a17decd27f9 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
333 container_of(work, struct ieee80211_roc_work, work.work); 333 container_of(work, struct ieee80211_roc_work, work.work);
334 struct ieee80211_sub_if_data *sdata = roc->sdata; 334 struct ieee80211_sub_if_data *sdata = roc->sdata;
335 struct ieee80211_local *local = sdata->local; 335 struct ieee80211_local *local = sdata->local;
336 bool started; 336 bool started, on_channel;
337 337
338 mutex_lock(&local->mtx); 338 mutex_lock(&local->mtx);
339 339
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
354 if (!roc->started) { 354 if (!roc->started) {
355 struct ieee80211_roc_work *dep; 355 struct ieee80211_roc_work *dep;
356 356
357 /* start this ROC */ 357 WARN_ON(local->use_chanctx);
358 ieee80211_offchannel_stop_vifs(local); 358
359 /* If actually operating on the desired channel (with at least
360 * 20 MHz channel width) don't stop all the operations but still
361 * treat it as though the ROC operation started properly, so
362 * other ROC operations won't interfere with this one.
363 */
364 roc->on_channel = roc->chan == local->_oper_chandef.chan &&
365 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
366 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
359 367
360 /* switch channel etc */ 368 /* start this ROC */
361 ieee80211_recalc_idle(local); 369 ieee80211_recalc_idle(local);
362 370
363 local->tmp_channel = roc->chan; 371 if (!roc->on_channel) {
364 ieee80211_hw_config(local, 0); 372 ieee80211_offchannel_stop_vifs(local);
373
374 local->tmp_channel = roc->chan;
375 ieee80211_hw_config(local, 0);
376 }
365 377
366 /* tell userspace or send frame */ 378 /* tell userspace or send frame */
367 ieee80211_handle_roc_started(roc); 379 ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
380 finish: 392 finish:
381 list_del(&roc->list); 393 list_del(&roc->list);
382 started = roc->started; 394 started = roc->started;
395 on_channel = roc->on_channel;
383 ieee80211_roc_notify_destroy(roc, !roc->abort); 396 ieee80211_roc_notify_destroy(roc, !roc->abort);
384 397
385 if (started) { 398 if (started && !on_channel) {
386 ieee80211_flush_queues(local, NULL); 399 ieee80211_flush_queues(local, NULL);
387 400
388 local->tmp_channel = NULL; 401 local->tmp_channel = NULL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 216c45b949e5..2b608b2b70ec 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1231 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1231 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1232 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1232 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1233 sta->last_rx = jiffies; 1233 sta->last_rx = jiffies;
1234 if (ieee80211_is_data(hdr->frame_control)) { 1234 if (ieee80211_is_data(hdr->frame_control) &&
1235 !is_multicast_ether_addr(hdr->addr1)) {
1235 sta->last_rx_rate_idx = status->rate_idx; 1236 sta->last_rx_rate_idx = status->rate_idx;
1236 sta->last_rx_rate_flag = status->flag; 1237 sta->last_rx_rate_flag = status->flag;
1237 sta->last_rx_rate_vht_flag = status->vht_flag; 1238 sta->last_rx_rate_vht_flag = status->vht_flag;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 137a192e64bc..847d92f6bef6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1148 atomic_dec(&ps->num_sta_ps); 1148 atomic_dec(&ps->num_sta_ps);
1149 1149
1150 /* This station just woke up and isn't aware of our SMPS state */ 1150 /* This station just woke up and isn't aware of our SMPS state */
1151 if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, 1151 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1152 !ieee80211_smps_is_restrictive(sta->known_smps_mode,
1152 sdata->smps_mode) && 1153 sdata->smps_mode) &&
1153 sta->known_smps_mode != sdata->bss->req_smps && 1154 sta->known_smps_mode != sdata->bss->req_smps &&
1154 sta_info_tx_streams(sta) != 1) { 1155 sta_info_tx_streams(sta) != 1) {
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 00ba90b02ab2..60cb7a665976 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
314 !is_multicast_ether_addr(hdr->addr1)) 314 !is_multicast_ether_addr(hdr->addr1))
315 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; 315 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
316 316
317 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 317 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
318 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
319 txflags |= IEEE80211_RADIOTAP_F_TX_CTS; 318 txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
320 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) 319 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
321 txflags |= IEEE80211_RADIOTAP_F_TX_RTS; 320 txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
322 321
323 put_unaligned_le16(txflags, pos); 322 put_unaligned_le16(txflags, pos);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index a0b0aea76525..cec5b60487a4 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -21,10 +21,10 @@
21 21
22#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ 22#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
23 __field(bool, p2p) \ 23 __field(bool, p2p) \
24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 24 __string(vif_name, sdata->name)
25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ 25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
26 __entry->p2p = sdata->vif.p2p; \ 26 __entry->p2p = sdata->vif.p2p; \
27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name) 27 __assign_str(vif_name, sdata->name)
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 275c94f995f7..3c365837e910 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1780 mutex_unlock(&local->mtx); 1780 mutex_unlock(&local->mtx);
1781 1781
1782 if (sched_scan_stopped) 1782 if (sched_scan_stopped)
1783 cfg80211_sched_scan_stopped(local->hw.wiphy); 1783 cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
1784 1784
1785 /* 1785 /*
1786 * If this is for hw restart things are still running. 1786 * If this is for hw restart things are still running.
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index e9e36a256165..9265adfdabfc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
129 if (!vht_cap_ie || !sband->vht_cap.vht_supported) 129 if (!vht_cap_ie || !sband->vht_cap.vht_supported)
130 return; 130 return;
131 131
132 /* A VHT STA must support 40 MHz */ 132 /*
133 if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 133 * A VHT STA must support 40 MHz, but if we verify that here
134 return; 134 * then we break a few things - some APs (e.g. Netgear R6300v2
135 * and others based on the BCM4360 chipset) will unset this
136 * capability bit when operating in 20 MHz.
137 */
135 138
136 vht_cap->vht_supported = true; 139 vht_cap->vht_supported = true;
137 140
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ccc46fa5edbc..58579634427d 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1336#ifdef CONFIG_NF_NAT_NEEDED 1336#ifdef CONFIG_NF_NAT_NEEDED
1337 int ret; 1337 int ret;
1338 1338
1339 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1340 return 0;
1341
1339 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1342 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1340 cda[CTA_NAT_DST]); 1343 cda[CTA_NAT_DST]);
1341 if (ret < 0) 1344 if (ret < 0)
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 804105391b9a..345acfb1720b 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -66,20 +66,6 @@ struct nft_jumpstack {
66 int rulenum; 66 int rulenum;
67}; 67};
68 68
69static inline void
70nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
71 struct nft_jumpstack *jumpstack, unsigned int stackptr)
72{
73 struct nft_stats __percpu *stats;
74 const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
75
76 rcu_read_lock_bh();
77 stats = rcu_dereference(nft_base_chain(chain)->stats);
78 __this_cpu_inc(stats->pkts);
79 __this_cpu_add(stats->bytes, pkt->skb->len);
80 rcu_read_unlock_bh();
81}
82
83enum nft_trace { 69enum nft_trace {
84 NFT_TRACE_RULE, 70 NFT_TRACE_RULE,
85 NFT_TRACE_RETURN, 71 NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
117unsigned int 103unsigned int
118nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) 104nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
119{ 105{
120 const struct nft_chain *chain = ops->priv; 106 const struct nft_chain *chain = ops->priv, *basechain = chain;
121 const struct nft_rule *rule; 107 const struct nft_rule *rule;
122 const struct nft_expr *expr, *last; 108 const struct nft_expr *expr, *last;
123 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
124 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
125 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
126 int rulenum = 0; 112 struct nft_stats __percpu *stats;
113 int rulenum;
127 /* 114 /*
128 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
129 * while traversing the ruleset. 116 * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
131 unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor); 118 unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
132 119
133do_chain: 120do_chain:
121 rulenum = 0;
134 rule = list_entry(&chain->rules, struct nft_rule, list); 122 rule = list_entry(&chain->rules, struct nft_rule, list);
135next_rule: 123next_rule:
136 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 124 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
156 switch (data[NFT_REG_VERDICT].verdict) { 144 switch (data[NFT_REG_VERDICT].verdict) {
157 case NFT_BREAK: 145 case NFT_BREAK:
158 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 146 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
159 /* fall through */ 147 continue;
160 case NFT_CONTINUE: 148 case NFT_CONTINUE:
149 if (unlikely(pkt->skb->nf_trace))
150 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
161 continue; 151 continue;
162 } 152 }
163 break; 153 break;
@@ -183,37 +173,44 @@ next_rule:
183 jumpstack[stackptr].rule = rule; 173 jumpstack[stackptr].rule = rule;
184 jumpstack[stackptr].rulenum = rulenum; 174 jumpstack[stackptr].rulenum = rulenum;
185 stackptr++; 175 stackptr++;
186 /* fall through */ 176 chain = data[NFT_REG_VERDICT].chain;
177 goto do_chain;
187 case NFT_GOTO: 178 case NFT_GOTO:
179 if (unlikely(pkt->skb->nf_trace))
180 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
181
188 chain = data[NFT_REG_VERDICT].chain; 182 chain = data[NFT_REG_VERDICT].chain;
189 goto do_chain; 183 goto do_chain;
190 case NFT_RETURN: 184 case NFT_RETURN:
191 if (unlikely(pkt->skb->nf_trace)) 185 if (unlikely(pkt->skb->nf_trace))
192 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN); 186 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
193 187 break;
194 /* fall through */
195 case NFT_CONTINUE: 188 case NFT_CONTINUE:
189 if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
190 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
196 break; 191 break;
197 default: 192 default:
198 WARN_ON(1); 193 WARN_ON(1);
199 } 194 }
200 195
201 if (stackptr > 0) { 196 if (stackptr > 0) {
202 if (unlikely(pkt->skb->nf_trace))
203 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
204
205 stackptr--; 197 stackptr--;
206 chain = jumpstack[stackptr].chain; 198 chain = jumpstack[stackptr].chain;
207 rule = jumpstack[stackptr].rule; 199 rule = jumpstack[stackptr].rule;
208 rulenum = jumpstack[stackptr].rulenum; 200 rulenum = jumpstack[stackptr].rulenum;
209 goto next_rule; 201 goto next_rule;
210 } 202 }
211 nft_chain_stats(chain, pkt, jumpstack, stackptr);
212 203
213 if (unlikely(pkt->skb->nf_trace)) 204 if (unlikely(pkt->skb->nf_trace))
214 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206
207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats);
209 __this_cpu_inc(stats->pkts);
210 __this_cpu_add(stats->bytes, pkt->skb->len);
211 rcu_read_unlock_bh();
215 212
216 return nft_base_chain(chain)->policy; 213 return nft_base_chain(basechain)->policy;
217} 214}
218EXPORT_SYMBOL_GPL(nft_do_chain); 215EXPORT_SYMBOL_GPL(nft_do_chain);
219 216
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e009087620e3..23ef77c60fff 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -256,15 +256,15 @@ replay:
256#endif 256#endif
257 { 257 {
258 nfnl_unlock(subsys_id); 258 nfnl_unlock(subsys_id);
259 kfree_skb(nskb); 259 netlink_ack(skb, nlh, -EOPNOTSUPP);
260 return netlink_ack(skb, nlh, -EOPNOTSUPP); 260 return kfree_skb(nskb);
261 } 261 }
262 } 262 }
263 263
264 if (!ss->commit || !ss->abort) { 264 if (!ss->commit || !ss->abort) {
265 nfnl_unlock(subsys_id); 265 nfnl_unlock(subsys_id);
266 kfree_skb(nskb); 266 netlink_ack(skb, nlh, -EOPNOTSUPP);
267 return netlink_ack(skb, nlh, -EOPNOTSUPP); 267 return kfree_skb(skb);
268 } 268 }
269 269
270 while (skb->len >= nlmsg_total_size(0)) { 270 while (skb->len >= nlmsg_total_size(0)) {
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 7633a752c65e..0ad080790a32 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
99 _debug("tktlen: %x", tktlen); 99 _debug("tktlen: %x", tktlen);
100 if (tktlen > AFSTOKEN_RK_TIX_MAX) 100 if (tktlen > AFSTOKEN_RK_TIX_MAX)
101 return -EKEYREJECTED; 101 return -EKEYREJECTED;
102 if (8 * 4 + tktlen != toklen) 102 if (toklen < 8 * 4 + tktlen)
103 return -EKEYREJECTED; 103 return -EKEYREJECTED;
104 104
105 plen = sizeof(*token) + sizeof(*token->kad) + tktlen; 105 plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index eed8404443d8..f435a88d899a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
188 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, 188 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
189}; 189};
190 190
191static void tcindex_filter_result_init(struct tcindex_filter_result *r)
192{
193 memset(r, 0, sizeof(*r));
194 tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
195}
196
191static int 197static int
192tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, 198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
193 u32 handle, struct tcindex_data *p, 199 u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
207 return err; 213 return err;
208 214
209 memcpy(&cp, p, sizeof(cp)); 215 memcpy(&cp, p, sizeof(cp));
210 memset(&new_filter_result, 0, sizeof(new_filter_result)); 216 tcindex_filter_result_init(&new_filter_result);
211 tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
212 217
218 tcindex_filter_result_init(&cr);
213 if (old_r) 219 if (old_r)
214 memcpy(&cr, r, sizeof(cr)); 220 cr.res = r->res;
215 else {
216 memset(&cr, 0, sizeof(cr));
217 tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
218 }
219 221
220 if (tb[TCA_TCINDEX_HASH]) 222 if (tb[TCA_TCINDEX_HASH])
221 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 223 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
267 err = -ENOMEM; 269 err = -ENOMEM;
268 if (!cp.perfect && !cp.h) { 270 if (!cp.perfect && !cp.h) {
269 if (valid_perfect_hash(&cp)) { 271 if (valid_perfect_hash(&cp)) {
272 int i;
273
270 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); 274 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
271 if (!cp.perfect) 275 if (!cp.perfect)
272 goto errout; 276 goto errout;
277 for (i = 0; i < cp.hash; i++)
278 tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
279 TCA_TCINDEX_POLICE);
273 balloc = 1; 280 balloc = 1;
274 } else { 281 } else {
275 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); 282 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
295 tcf_bind_filter(tp, &cr.res, base); 302 tcf_bind_filter(tp, &cr.res, base);
296 } 303 }
297 304
298 tcf_exts_change(tp, &cr.exts, &e); 305 if (old_r)
306 tcf_exts_change(tp, &r->exts, &e);
307 else
308 tcf_exts_change(tp, &cr.exts, &e);
299 309
300 tcf_tree_lock(tp); 310 tcf_tree_lock(tp);
301 if (old_r && old_r != r) 311 if (old_r && old_r != r)
302 memset(old_r, 0, sizeof(*old_r)); 312 tcindex_filter_result_init(old_r);
303 313
304 memcpy(p, &cp, sizeof(cp)); 314 memcpy(p, &cp, sizeof(cp));
305 memcpy(r, &cr, sizeof(cr)); 315 r->res = cr.res;
306 316
307 if (r == &new_filter_result) { 317 if (r == &new_filter_result) {
308 struct tcindex_filter **fp; 318 struct tcindex_filter **fp;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7d09a712cb1f..88f108edfb58 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
284} 284}
285EXPORT_SYMBOL(cfg80211_sched_scan_results); 285EXPORT_SYMBOL(cfg80211_sched_scan_results);
286 286
287void cfg80211_sched_scan_stopped(struct wiphy *wiphy) 287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
288{ 288{
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
290 290
291 ASSERT_RTNL();
292
291 trace_cfg80211_sched_scan_stopped(wiphy); 293 trace_cfg80211_sched_scan_stopped(wiphy);
292 294
293 rtnl_lock();
294 __cfg80211_stop_sched_scan(rdev, true); 295 __cfg80211_stop_sched_scan(rdev, true);
296}
297EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
298
299void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
300{
301 rtnl_lock();
302 cfg80211_sched_scan_stopped_rtnl(wiphy);
295 rtnl_unlock(); 303 rtnl_unlock();
296} 304}
297EXPORT_SYMBOL(cfg80211_sched_scan_stopped); 305EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index acdcb4a81817..3546a77033de 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
234 NULL, 0, NULL, 0, 234 NULL, 0, NULL, 0,
235 WLAN_STATUS_UNSPECIFIED_FAILURE, 235 WLAN_STATUS_UNSPECIFIED_FAILURE,
236 false, NULL); 236 false, NULL);
237 cfg80211_sme_free(wdev);
238 } 237 }
239 wdev_unlock(wdev); 238 wdev_unlock(wdev);
240 } 239 }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
648 cfg80211_unhold_bss(bss_from_pub(bss)); 647 cfg80211_unhold_bss(bss_from_pub(bss));
649 cfg80211_put_bss(wdev->wiphy, bss); 648 cfg80211_put_bss(wdev->wiphy, bss);
650 } 649 }
650 cfg80211_sme_free(wdev);
651 return; 651 return;
652 } 652 }
653 653
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index fd8fa9aa7c4e..5b3add31f9f1 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -25,7 +25,7 @@ cat << EOF
25#define __IGNORE_rmdir /* unlinkat */ 25#define __IGNORE_rmdir /* unlinkat */
26#define __IGNORE_lchown /* fchownat */ 26#define __IGNORE_lchown /* fchownat */
27#define __IGNORE_access /* faccessat */ 27#define __IGNORE_access /* faccessat */
28#define __IGNORE_rename /* renameat */ 28#define __IGNORE_rename /* renameat2 */
29#define __IGNORE_readlink /* readlinkat */ 29#define __IGNORE_readlink /* readlinkat */
30#define __IGNORE_symlink /* symlinkat */ 30#define __IGNORE_symlink /* symlinkat */
31#define __IGNORE_utimes /* futimesat */ 31#define __IGNORE_utimes /* futimesat */
@@ -37,6 +37,9 @@ cat << EOF
37#define __IGNORE_lstat64 /* fstatat64 */ 37#define __IGNORE_lstat64 /* fstatat64 */
38#endif 38#endif
39 39
40/* Missing flags argument */
41#define __IGNORE_renameat /* renameat2 */
42
40/* CLOEXEC flag */ 43/* CLOEXEC flag */
41#define __IGNORE_pipe /* pipe2 */ 44#define __IGNORE_pipe /* pipe2 */
42#define __IGNORE_dup2 /* dup3 */ 45#define __IGNORE_dup2 /* dup3 */
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 94d08733cb38..76cbb9ec953a 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -182,6 +182,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
182int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 182int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
183{ 183{
184 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); 184 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
185 struct snd_pcm_runtime *runtime = substream->runtime;
185 int ret; 186 int ret;
186 187
187 switch (cmd) { 188 switch (cmd) {
@@ -196,6 +197,11 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
196 dmaengine_resume(prtd->dma_chan); 197 dmaengine_resume(prtd->dma_chan);
197 break; 198 break;
198 case SNDRV_PCM_TRIGGER_SUSPEND: 199 case SNDRV_PCM_TRIGGER_SUSPEND:
200 if (runtime->info & SNDRV_PCM_INFO_PAUSE)
201 dmaengine_pause(prtd->dma_chan);
202 else
203 dmaengine_terminate_all(prtd->dma_chan);
204 break;
199 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 205 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
200 dmaengine_pause(prtd->dma_chan); 206 dmaengine_pause(prtd->dma_chan);
201 break; 207 break;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2c54629d62d1..6cc3cf285558 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1743,6 +1743,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
1743 /* Lynx Point */ 1743 /* Lynx Point */
1744 { PCI_DEVICE(0x8086, 0x8c20), 1744 { PCI_DEVICE(0x8086, 0x8c20),
1745 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1745 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1746 /* 9 Series */
1747 { PCI_DEVICE(0x8086, 0x8ca0),
1748 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1746 /* Wellsburg */ 1749 /* Wellsburg */
1747 { PCI_DEVICE(0x8086, 0x8d20), 1750 { PCI_DEVICE(0x8086, 0x8d20),
1748 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1751 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
diff --git a/tools/Makefile b/tools/Makefile
index bcae806b0c39..9a617adc6675 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -44,6 +44,9 @@ cpupower: FORCE
44cgroup firewire hv guest usb virtio vm net: FORCE 44cgroup firewire hv guest usb virtio vm net: FORCE
45 $(call descend,$@) 45 $(call descend,$@)
46 46
47liblockdep: FORCE
48 $(call descend,lib/lockdep)
49
47libapikfs: FORCE 50libapikfs: FORCE
48 $(call descend,lib/api) 51 $(call descend,lib/api)
49 52
@@ -91,6 +94,9 @@ cpupower_clean:
91cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean: 94cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
92 $(call descend,$(@:_clean=),clean) 95 $(call descend,$(@:_clean=),clean)
93 96
97liblockdep_clean:
98 $(call descend,lib/lockdep,clean)
99
94libapikfs_clean: 100libapikfs_clean:
95 $(call descend,lib/api,clean) 101 $(call descend,lib/api,clean)
96 102
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index cb09d3ff8f58..bba2f5253b6e 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -1,8 +1,7 @@
1# file format version 1# file format version
2FILE_VERSION = 1 2FILE_VERSION = 1
3 3
4MAKEFLAGS += --no-print-directory 4LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
5LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
6 5
7# Makefiles suck: This macro sets a default value of $(2) for the 6# Makefiles suck: This macro sets a default value of $(2) for the
8# variable named by $(1), unless the variable has been set by 7# variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
231install: install_lib 230install: install_lib
232 231
233clean: 232clean:
234 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d 233 $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
235 $(RM) tags TAGS 234 $(RM) tags TAGS
236 235
237endif # skip-makefile 236endif # skip-makefile