aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei15
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt10
-rw-r--r--Documentation/devicetree/bindings/input/stmpe-keypad.txt2
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt3
-rw-r--r--Documentation/kernel-parameters.txt1
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py49
-rw-r--r--Documentation/thermal/cpu-cooling-api.txt15
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile3
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts24
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/berlin2q-marvell-dmp.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi63
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts10
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts4
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi6
-rw-r--r--arch/arm/boot/dts/imx25.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts22
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts15
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi30
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts8
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts15
-rw-r--r--arch/arm/configs/exynos_defconfig18
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/perf_regs.c8
-rw-r--r--arch/arm/kernel/setup.c9
-rw-r--r--arch/arm/kernel/smp.c12
-rw-r--r--arch/arm/mach-at91/board-dt-sama5.c18
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6sx.c3
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/control.h4
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S21
-rw-r--r--arch/arm/mach-omap2/omap-smp.c13
-rw-r--r--arch/arm/mach-omap2/timer.c44
-rw-r--r--arch/arm/mach-rockchip/rockchip.c27
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c7
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c3
-rw-r--r--arch/arm/mm/dump.c9
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm64/configs/defconfig9
-rw-r--r--arch/arm64/include/asm/arch_timer.h1
-rw-r--r--arch/arm64/include/asm/cpu.h5
-rw-r--r--arch/arm64/include/asm/dma-mapping.h11
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kernel/cpuinfo.c10
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/module.c1
-rw-r--r--arch/arm64/kernel/perf_regs.c8
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kernel/smp_spin_table.c1
-rw-r--r--arch/arm64/kernel/suspend.c14
-rw-r--r--arch/arm64/kvm/hyp.S1
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/arm64/mm/init.c8
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c1
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/nios2/kernel/cpuinfo.c1
-rw-r--r--arch/nios2/kernel/entry.S20
-rw-r--r--arch/parisc/include/asm/ldcw.h13
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/include/asm/kexec.h10
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h13
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/timex.h10
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/uprobes.c69
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/pgtable.c5
-rw-r--r--arch/s390/net/bpf_jit_comp.c8
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S46
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c2
-rw-r--r--arch/x86/include/asm/vgtod.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c44
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c17
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/perf_regs.c90
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c88
-rw-r--r--arch/x86/lib/insn.c2
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--arch/x86/um/sys_call_table_32.c2
-rw-r--r--arch/x86/um/sys_call_table_64.c2
-rw-r--r--arch/x86/vdso/vma.c45
-rw-r--r--arch/x86/xen/enlighten.c22
-rw-r--r--arch/x86/xen/p2m.c20
-rw-r--r--arch/x86/xen/setup.c42
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-mq-tag.c14
-rw-r--r--block/blk-mq-tag.h1
-rw-r--r--block/blk-mq.c75
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk-timeout.c3
-rw-r--r--crypto/aes_generic.c1
-rw-r--r--crypto/af_alg.c3
-rw-r--r--crypto/ansi_cprng.c1
-rw-r--r--crypto/blowfish_generic.c1
-rw-r--r--crypto/camellia_generic.c1
-rw-r--r--crypto/cast5_generic.c1
-rw-r--r--crypto/cast6_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/crct10dif_generic.c1
-rw-r--r--crypto/des_generic.c7
-rw-r--r--crypto/ghash-generic.c1
-rw-r--r--crypto/krng.c1
-rw-r--r--crypto/salsa20_generic.c1
-rw-r--r--crypto/serpent_generic.c1
-rw-r--r--crypto/sha1_generic.c1
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/tea.c1
-rw-r--r--crypto/tgr192.c1
-rw-r--r--crypto/twofish_generic.c1
-rw-r--r--crypto/wp512.c1
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/acpi_processor.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/int340x_thermal.c11
-rw-r--r--drivers/acpi/processor_core.c56
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/acpi/video.c27
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/opp.c39
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c175
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/bus/arm-cci.c3
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c46
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/clk/at91/clk-slow.c27
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-ppc-corenet.c2
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c27
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c28
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/cpuidle/governors/menu.c25
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-dln2.c156
-rw-r--r--drivers/gpio/gpio-grgpio.c3
-rw-r--r--drivers/gpio/gpiolib-of.c10
-rw-r--r--drivers/gpio/gpiolib-sysfs.c92
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c320
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h17
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c24
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c69
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c48
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/gem.c52
-rw-r--r--drivers/hid/Kconfig3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c41
-rw-r--r--drivers/hid/hid-roccat-pyra.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/iio/adc/ad799x.c15
-rw-r--r--drivers/iio/inkern.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/input/evdev.c60
-rw-r--r--drivers/input/input.c22
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/keyboard/gpio_keys.c114
-rw-r--r--drivers/input/keyboard/hil_kbd.c6
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c141
-rw-r--r--drivers/input/mouse/alps.c84
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/mouse/trackpoint.h5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h39
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c99
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c4
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/mcb/mcb-internal.h1
-rw-r--r--drivers/mcb/mcb-pci.c27
-rw-r--r--drivers/md/dm-thin.c29
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/stmpe.h3
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/file.c14
-rw-r--r--drivers/misc/mei/hw-me.c12
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c15
-rw-r--r--drivers/mmc/host/sdhci.c80
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c29
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/m_can/m_can.c5
-rw-r--r--drivers/net/can/usb/kvaser_usb.c31
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c15
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c24
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c144
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c18
-rw-r--r--drivers/net/ethernet/dnet.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c47
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c104
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c37
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/s6gmac.c1058
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c57
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c96
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c10
-rw-r--r--drivers/net/usb/r8152.c47
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c17
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c32
-rw-r--r--drivers/net/xen-netback/xenbus.c1
-rw-r--r--drivers/net/xen-netfront.c71
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-omap-control.c7
-rw-r--r--drivers/phy/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/phy-ti-pipe3.c10
-rw-r--r--drivers/pinctrl/core.c5
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c102
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/regulator/s2mps11.c19
-rw-r--r--drivers/reset/reset-sunxi.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/spi/spi-img-spfi.c8
-rw-r--r--drivers/spi/spi-sh-msiof.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c2
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/channel.c8
-rw-r--r--drivers/staging/vt6655/device_main.c13
-rw-r--r--drivers/staging/vt6655/rxtx.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/target_core_device.c54
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_pr.c12
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c15
-rw-r--r--drivers/target/target_core_spc.c5
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/thermal/cpu_cooling.c360
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c20
-rw-r--r--drivers/thermal/imx_thermal.c17
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c24
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c311
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c17
-rw-r--r--drivers/thermal/rockchip_thermal.c1
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c17
-rw-r--r--drivers/tty/n_tty.c9
-rw-r--r--drivers/tty/serial/8250/8250_pci.c29
-rw-r--r--drivers/tty/serial/samsung.c56
-rw-r--r--drivers/tty/serial/serial_core.c4
-rw-r--r--drivers/tty/tty_io.c7
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/host.c1
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c5
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c19
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c3
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/pci-quirks.c18
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci.c9
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/blackfin.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c34
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/phy/phy.c16
-rw-r--r--drivers/usb/serial/console.c16
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/generic.c4
-rw-r--r--drivers/usb/serial/keyspan.c20
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/uas-detect.h33
-rw-r--r--drivers/usb/storage/unusual_uas.h46
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/core/fb_defio.c5
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/logo/logo.c17
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_legacy.c12
-rw-r--r--fs/btrfs/backref.c13
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/extent-tree.c12
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/netmisc.c12
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smb2misc.c12
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smb2transport.c2
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c220
-rw-r--r--fs/ext4/resize.c24
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c31
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/isofs/rock.c3
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/lockd/svc.c8
-rw-r--r--fs/locks.c2
-rw-r--r--fs/nfs/nfs4client.c42
-rw-r--r--fs/nfs/nfs4proc.c21
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/namei.c43
-rw-r--r--fs/udf/dir.c31
-rw-r--r--fs/udf/inode.c14
-rw-r--r--fs/udf/namei.c17
-rw-r--r--fs/udf/symlink.c57
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/udf/unicode.c28
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/asm-generic/tlb.h8
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_gem.h7
-rw-r--r--include/dt-bindings/thermal/thermal.h2
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/ceph/osd_client.h4
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/cpu_cooling.h6
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genetlink.h4
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/mfd/stmpe.h22
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/netdevice.h26
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/perf_event.h12
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/phy/omap_control_phy.h6
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/genetlink.h14
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/vxlan.h28
-rw-r--r--include/sound/pcm.h10
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_backend_configfs.h2
-rw-r--r--include/target/target_core_base.h3
-rw-r--r--include/uapi/asm-generic/fcntl.h2
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/in6.h3
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/libc-compat.h3
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/uinput.h4
-rw-r--r--include/uapi/linux/virtio_ring.h7
-rw-r--r--include/xen/interface/nmi.h51
-rw-r--r--kernel/audit.c10
-rw-r--r--kernel/auditfilter.c23
-rw-r--r--kernel/auditsc.c49
-rw-r--r--kernel/debug/debug_core.c52
-rw-r--r--kernel/debug/kdb/kdb_bp.c37
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c267
-rw-r--r--kernel/debug/kdb/kdb_private.h3
-rw-r--r--kernel/events/core.c19
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/locking/mutex-debug.c2
-rw-r--r--kernel/range.c10
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/trace/ftrace.c53
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace_events.c69
-rw-r--r--kernel/trace/trace_kdb.c4
-rw-r--r--lib/Kconfig.kgdb25
-rw-r--r--lib/assoc_array.c1
-rw-r--r--mm/Kconfig.debug9
-rw-r--r--mm/filemap.c29
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/memory.c43
-rw-r--r--mm/mmap.c13
-rw-r--r--mm/page-writeback.c43
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/vmscan.c24
-rw-r--r--net/batman-adv/fragmentation.c4
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/multicast.c11
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/originator.c7
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/bluetooth/6lowpan.c1
-rw-r--r--net/bluetooth/bnep/core.c3
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_event.c16
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/ceph/auth_x.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/core/dev.c195
-rw-r--r--net/core/neighbour.c44
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/ipv4/geneve.c6
-rw-r--r--net/ipv4/ip_sockglue.c8
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/mac80211/key.c12
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mpls/mpls_gso.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c20
-rw-r--r--net/netfilter/nf_tables_api.c14
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nft_nat.c8
-rw-r--r--net/netlink/af_netlink.c48
-rw-r--r--net/netlink/af_netlink.h9
-rw-r--r--net/netlink/genetlink.c64
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/openvswitch/datapath.c6
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/flow_netlink.c13
-rw-r--r--net/openvswitch/vport-geneve.c3
-rw-r--r--net/openvswitch/vport-gre.c18
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/openvswitch/vport.c7
-rw-r--r--net/packet/af_packet.c13
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/tipc/bcast.c5
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/wireless/reg.c56
-rw-r--r--scripts/Makefile.clean16
-rw-r--r--security/keys/gc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c2
-rw-r--r--sound/pci/hda/hda_controller.c24
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/soc/codecs/rt5677.c9
-rw-r--r--sound/soc/dwc/designware_i2s.c49
-rw-r--r--sound/soc/intel/Kconfig4
-rw-r--r--sound/soc/intel/bytcr_dpcm_rt5640.c2
-rw-r--r--sound/soc/intel/sst-firmware.c6
-rw-r--r--sound/soc/intel/sst/sst_acpi.c2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c4
-rw-r--r--sound/soc/rockchip/rockchip_i2s.h2
-rw-r--r--sound/soc/soc-core.c14
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--tools/include/asm-generic/bitops.h2
-rw-r--r--tools/include/asm-generic/bitops/arch_hweight.h1
-rw-r--r--tools/include/asm-generic/bitops/const_hweight.h1
-rw-r--r--tools/include/asm-generic/bitops/hweight.h7
-rw-r--r--tools/include/linux/bitops.h7
-rw-r--r--tools/lib/api/fs/debugfs.c2
-rw-r--r--tools/lib/api/fs/fs.c2
-rw-r--r--tools/lib/lockdep/preload.c4
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/Makefile.perf11
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c19
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-diff.c46
-rw-r--r--tools/perf/builtin-list.c13
-rw-r--r--tools/perf/builtin-report.c24
-rw-r--r--tools/perf/builtin-top.c5
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/config/Makefile.arch26
-rw-r--r--tools/perf/perf-sys.h1
-rw-r--r--tools/perf/tests/dwarf-unwind.c36
-rw-r--r--tools/perf/tests/hists_cumulate.c66
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/ui/hist.c3
-rw-r--r--tools/perf/ui/tui/setup.c26
-rw-r--r--tools/perf/util/annotate.h8
-rw-r--r--tools/perf/util/cache.h2
-rw-r--r--tools/perf/util/callchain.c30
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/hist.c18
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/probe-event.c10
-rw-r--r--tools/perf/util/probe-finder.c18
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/unwind-libunwind.c28
-rw-r--r--tools/power/cpupower/utils/cpupower.c2
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c2
-rw-r--r--tools/testing/selftests/exec/execveat.c23
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c3
-rw-r--r--tools/testing/selftests/vm/Makefile2
-rw-r--r--virt/kvm/kvm_main.c26
723 files changed, 7431 insertions, 4964 deletions
diff --git a/.mailmap b/.mailmap
index ada8ad696b2e..d357e1bd2a43 100644
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
51Greg Kroah-Hartman <greg@kroah.com> 51Greg Kroah-Hartman <greg@kroah.com>
52Henk Vergonet <Henk.Vergonet@gmail.com> 52Henk Vergonet <Henk.Vergonet@gmail.com>
53Henrik Kretzschmar <henne@nachtwindheim.de> 53Henrik Kretzschmar <henne@nachtwindheim.de>
54Henrik Rydberg <rydberg@bitmath.org>
54Herbert Xu <herbert@gondor.apana.org.au> 55Herbert Xu <herbert@gondor.apana.org.au>
55Jacob Shin <Jacob.Shin@amd.com> 56Jacob Shin <Jacob.Shin@amd.com>
56James Bottomley <jejb@mulgrave.(none)> 57James Bottomley <jejb@mulgrave.(none)>
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 0ec8b8178c41..80d9888a8ece 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -14,3 +14,18 @@ Description:
14 The /sys/class/mei/meiN directory is created for 14 The /sys/class/mei/meiN directory is created for
15 each probed mei device 15 each probed mei device
16 16
17What: /sys/class/mei/meiN/fw_status
18Date: Nov 2014
19KernelVersion: 3.19
20Contact: Tomas Winkler <tomas.winkler@intel.com>
21Description: Display fw status registers content
22
23 The ME FW writes its status information into fw status
24 registers for BIOS and OS to monitor fw health.
25
26 The register contains running state, power management
27 state, error codes, and others. The way the registers
28 are decoded depends on PCH or SoC generation.
29 Also number of registers varies between 1 and 6
30 depending on generation.
31
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index a4a38fcf2ed6..44b705767aca 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,12 +10,13 @@ Optional properties:
10Each button (key) is represented as a sub-node of "gpio-keys": 10Each button (key) is represented as a sub-node of "gpio-keys":
11Subnode properties: 11Subnode properties:
12 12
13 - gpios: OF device-tree gpio specification.
14 - interrupts: the interrupt line for that input.
13 - label: Descriptive name of the key. 15 - label: Descriptive name of the key.
14 - linux,code: Keycode to emit. 16 - linux,code: Keycode to emit.
15 17
16Required mutual exclusive subnode-properties: 18Note that either "interrupts" or "gpios" properties can be omitted, but not
17 - gpios: OF device-tree gpio specification. 19both at the same time. Specifying both properties is allowed.
18 - interrupts: the interrupt line for that input
19 20
20Optional subnode-properties: 21Optional subnode-properties:
21 - linux,input-type: Specify event type this button/key generates. 22 - linux,input-type: Specify event type this button/key generates.
@@ -23,6 +24,9 @@ Optional subnode-properties:
23 - debounce-interval: Debouncing interval time in milliseconds. 24 - debounce-interval: Debouncing interval time in milliseconds.
24 If not specified defaults to 5. 25 If not specified defaults to 5.
25 - gpio-key,wakeup: Boolean, button can wake-up the system. 26 - gpio-key,wakeup: Boolean, button can wake-up the system.
27 - linux,can-disable: Boolean, indicates that button is connected
28 to dedicated (not shared) interrupt which can be disabled to
29 suppress events from the button.
26 30
27Example nodes: 31Example nodes:
28 32
diff --git a/Documentation/devicetree/bindings/input/stmpe-keypad.txt b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
index 1b97222e8a0b..12bb771d66d4 100644
--- a/Documentation/devicetree/bindings/input/stmpe-keypad.txt
+++ b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
@@ -8,6 +8,8 @@ Optional properties:
8 - debounce-interval : Debouncing interval time in milliseconds 8 - debounce-interval : Debouncing interval time in milliseconds
9 - st,scan-count : Scanning cycles elapsed before key data is updated 9 - st,scan-count : Scanning cycles elapsed before key data is updated
10 - st,no-autorepeat : If specified device will not autorepeat 10 - st,no-autorepeat : If specified device will not autorepeat
11 - keypad,num-rows : See ./matrix-keymap.txt
12 - keypad,num-columns : See ./matrix-keymap.txt
11 13
12Example: 14Example:
13 15
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 032808843f90..24c5cdaba8d2 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,8 @@ This file provides information, what the device node
4for the davinci_emac interface contains. 4for the davinci_emac interface contains.
5 5
6Required properties: 6Required properties:
7- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac" 7- compatible: "ti,davinci-dm6467-emac", "ti,am3517-emac" or
8 "ti,dm816-emac"
8- reg: Offset and length of the register set for the device 9- reg: Offset and length of the register set for the device
9- ti,davinci-ctrl-reg-offset: offset to control register 10- ti,davinci-ctrl-reg-offset: offset to control register
10- ti,davinci-ctrl-mod-reg-offset: offset to control module register 11- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4df73da11adc..176d4fe4f076 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1277,6 +1277,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1277 i8042.notimeout [HW] Ignore timeout condition signalled by controller 1277 i8042.notimeout [HW] Ignore timeout condition signalled by controller
1278 i8042.reset [HW] Reset the controller during init and cleanup 1278 i8042.reset [HW] Reset the controller during init and cleanup
1279 i8042.unlock [HW] Unlock (ignore) the keylock 1279 i8042.unlock [HW] Unlock (ignore) the keylock
1280 i8042.kbdreset [HW] Reset device connected to KBD port
1280 1281
1281 i810= [HW,DRM] 1282 i810= [HW,DRM]
1282 1283
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9bffdfc648dc..85b022179104 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
66route/max_size - INTEGER 66route/max_size - INTEGER
67 Maximum number of routes allowed in the kernel. Increase 67 Maximum number of routes allowed in the kernel. Increase
68 this when using large numbers of interfaces and/or routes. 68 this when using large numbers of interfaces and/or routes.
69 From linux kernel 3.6 onwards, this is deprecated for ipv4
70 as route cache is no longer used.
69 71
70neigh/default/gc_thresh1 - INTEGER 72neigh/default/gc_thresh1 - INTEGER
71 Minimum number of entries to keep. Garbage collector will not 73 Minimum number of entries to keep. Garbage collector will not
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 230ce71f4d75..2b47704f75cb 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
389 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" 389 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
390 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" 390 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
391 buf += " .close_session = " + fabric_mod_name + "_close_session,\n" 391 buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
392 buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
393 buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
394 buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
395 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" 392 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
396 buf += " .sess_get_initiator_sid = NULL,\n" 393 buf += " .sess_get_initiator_sid = NULL,\n"
397 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" 394 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
402 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" 399 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
403 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" 400 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
404 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" 401 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
405 buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" 402 buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
406 buf += " /*\n" 403 buf += " /*\n"
407 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" 404 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
408 buf += " */\n" 405 buf += " */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
428 buf += " /*\n" 425 buf += " /*\n"
429 buf += " * Register the top level struct config_item_type with TCM core\n" 426 buf += " * Register the top level struct config_item_type with TCM core\n"
430 buf += " */\n" 427 buf += " */\n"
431 buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" 428 buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
432 buf += " if (IS_ERR(fabric)) {\n" 429 buf += " if (IS_ERR(fabric)) {\n"
433 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" 430 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
434 buf += " return PTR_ERR(fabric);\n" 431 buf += " return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
595 if re.search('get_fabric_name', fo): 592 if re.search('get_fabric_name', fo):
596 buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" 593 buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
597 buf += "{\n" 594 buf += "{\n"
598 buf += " return \"" + fabric_mod_name[4:] + "\";\n" 595 buf += " return \"" + fabric_mod_name + "\";\n"
599 buf += "}\n\n" 596 buf += "}\n\n"
600 bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" 597 bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
601 continue 598 continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
820 buf += "}\n\n" 817 buf += "}\n\n"
821 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" 818 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
822 819
823 if re.search('stop_session\)\(', fo):
824 buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
825 buf += "{\n"
826 buf += " return;\n"
827 buf += "}\n\n"
828 bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
829
830 if re.search('fall_back_to_erl0\)\(', fo):
831 buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
832 buf += "{\n"
833 buf += " return;\n"
834 buf += "}\n\n"
835 bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
836
837 if re.search('sess_logged_in\)\(', fo):
838 buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
839 buf += "{\n"
840 buf += " return 0;\n"
841 buf += "}\n\n"
842 bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
843
844 if re.search('sess_get_index\)\(', fo): 820 if re.search('sess_get_index\)\(', fo):
845 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" 821 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
846 buf += "{\n" 822 buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
898 bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" 874 bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
899 875
900 if re.search('queue_tm_rsp\)\(', fo): 876 if re.search('queue_tm_rsp\)\(', fo):
901 buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" 877 buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
902 buf += "{\n" 878 buf += "{\n"
903 buf += " return 0;\n" 879 buf += " return;\n"
904 buf += "}\n\n" 880 buf += "}\n\n"
905 bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" 881 bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
906 882
907 if re.search('is_state_remove\)\(', fo): 883 if re.search('aborted_task\)\(', fo):
908 buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" 884 buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
909 buf += "{\n" 885 buf += "{\n"
910 buf += " return 0;\n" 886 buf += " return;\n"
911 buf += "}\n\n" 887 buf += "}\n\n"
912 bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" 888 bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
913
914 889
915 ret = p.write(buf) 890 ret = p.write(buf)
916 if ret: 891 if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
1018 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) 993 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
1019 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) 994 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
1020 995
1021 input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") 996 input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
1022 if input == "yes" or input == "y": 997 if input == "yes" or input == "y":
1023 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) 998 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
1024 999
1025 input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") 1000 input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
1026 if input == "yes" or input == "y": 1001 if input == "yes" or input == "y":
1027 tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) 1002 tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
1028 1003
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt
index fca24c931ec8..753e47cc2e20 100644
--- a/Documentation/thermal/cpu-cooling-api.txt
+++ b/Documentation/thermal/cpu-cooling-api.txt
@@ -3,7 +3,7 @@ CPU cooling APIs How To
3 3
4Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> 4Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
5 5
6Updated: 12 May 2012 6Updated: 6 Jan 2015
7 7
8Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 8Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
9 9
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
25 25
26 clip_cpus: cpumask of cpus where the frequency constraints will happen. 26 clip_cpus: cpumask of cpus where the frequency constraints will happen.
27 27
281.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 281.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
29 struct device_node *np, const struct cpumask *clip_cpus)
30
31 This interface function registers the cpufreq cooling device with
32 the name "thermal-cpufreq-%x" linking it with a device tree node, in
33 order to bind it via the thermal DT code. This api can support multiple
34 instances of cpufreq cooling devices.
35
36 np: pointer to the cooling device device tree node
37 clip_cpus: cpumask of cpus where the frequency constraints will happen.
38
391.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
29 40
30 This interface function unregisters the "thermal-cpufreq-%x" cooling device. 41 This interface function unregisters the "thermal-cpufreq-%x" cooling device.
31 42
diff --git a/MAINTAINERS b/MAINTAINERS
index ddb9ac8d32b3..64e13d5c1e65 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h
724F: drivers/char/apm-emulation.c 724F: drivers/char/apm-emulation.c
725 725
726APPLE BCM5974 MULTITOUCH DRIVER 726APPLE BCM5974 MULTITOUCH DRIVER
727M: Henrik Rydberg <rydberg@euromail.se> 727M: Henrik Rydberg <rydberg@bitmath.org>
728L: linux-input@vger.kernel.org 728L: linux-input@vger.kernel.org
729S: Maintained 729S: Odd fixes
730F: drivers/input/mouse/bcm5974.c 730F: drivers/input/mouse/bcm5974.c
731 731
732APPLE SMC DRIVER 732APPLE SMC DRIVER
733M: Henrik Rydberg <rydberg@euromail.se> 733M: Henrik Rydberg <rydberg@bitmath.org>
734L: lm-sensors@lm-sensors.org 734L: lm-sensors@lm-sensors.org
735S: Maintained 735S: Odd fixes
736F: drivers/hwmon/applesmc.c 736F: drivers/hwmon/applesmc.c
737 737
738APPLETALK NETWORK LAYER 738APPLETALK NETWORK LAYER
@@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c
2259BTRFS FILE SYSTEM 2259BTRFS FILE SYSTEM
2260M: Chris Mason <clm@fb.com> 2260M: Chris Mason <clm@fb.com>
2261M: Josef Bacik <jbacik@fb.com> 2261M: Josef Bacik <jbacik@fb.com>
2262M: David Sterba <dsterba@suse.cz>
2262L: linux-btrfs@vger.kernel.org 2263L: linux-btrfs@vger.kernel.org
2263W: http://btrfs.wiki.kernel.org/ 2264W: http://btrfs.wiki.kernel.org/
2264Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 2265Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2345,7 +2346,8 @@ CAN NETWORK LAYER
2345M: Oliver Hartkopp <socketcan@hartkopp.net> 2346M: Oliver Hartkopp <socketcan@hartkopp.net>
2346L: linux-can@vger.kernel.org 2347L: linux-can@vger.kernel.org
2347W: http://gitorious.org/linux-can 2348W: http://gitorious.org/linux-can
2348T: git git://gitorious.org/linux-can/linux-can-next.git 2349T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
2350T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
2349S: Maintained 2351S: Maintained
2350F: Documentation/networking/can.txt 2352F: Documentation/networking/can.txt
2351F: net/can/ 2353F: net/can/
@@ -2360,7 +2362,8 @@ M: Wolfgang Grandegger <wg@grandegger.com>
2360M: Marc Kleine-Budde <mkl@pengutronix.de> 2362M: Marc Kleine-Budde <mkl@pengutronix.de>
2361L: linux-can@vger.kernel.org 2363L: linux-can@vger.kernel.org
2362W: http://gitorious.org/linux-can 2364W: http://gitorious.org/linux-can
2363T: git git://gitorious.org/linux-can/linux-can-next.git 2365T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
2366T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
2364S: Maintained 2367S: Maintained
2365F: drivers/net/can/ 2368F: drivers/net/can/
2366F: include/linux/can/dev.h 2369F: include/linux/can/dev.h
@@ -3182,7 +3185,7 @@ L: dmaengine@vger.kernel.org
3182Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 3185Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
3183S: Maintained 3186S: Maintained
3184F: drivers/dma/ 3187F: drivers/dma/
3185F: include/linux/dma* 3188F: include/linux/dmaengine.h
3186F: Documentation/dmaengine/ 3189F: Documentation/dmaengine/
3187T: git git://git.infradead.org/users/vkoul/slave-dma.git 3190T: git git://git.infradead.org/users/vkoul/slave-dma.git
3188 3191
@@ -4748,7 +4751,7 @@ S: Supported
4748F: drivers/scsi/ipr.* 4751F: drivers/scsi/ipr.*
4749 4752
4750IBM Power Virtual Ethernet Device Driver 4753IBM Power Virtual Ethernet Device Driver
4751M: Santiago Leon <santil@linux.vnet.ibm.com> 4754M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
4752L: netdev@vger.kernel.org 4755L: netdev@vger.kernel.org
4753S: Supported 4756S: Supported
4754F: drivers/net/ethernet/ibm/ibmveth.* 4757F: drivers/net/ethernet/ibm/ibmveth.*
@@ -4929,7 +4932,6 @@ F: include/uapi/linux/inotify.h
4929 4932
4930INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS 4933INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS
4931M: Dmitry Torokhov <dmitry.torokhov@gmail.com> 4934M: Dmitry Torokhov <dmitry.torokhov@gmail.com>
4932M: Dmitry Torokhov <dtor@mail.ru>
4933L: linux-input@vger.kernel.org 4935L: linux-input@vger.kernel.org
4934Q: http://patchwork.kernel.org/project/linux-input/list/ 4936Q: http://patchwork.kernel.org/project/linux-input/list/
4935T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git 4937T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
@@ -4940,10 +4942,10 @@ F: include/uapi/linux/input.h
4940F: include/linux/input/ 4942F: include/linux/input/
4941 4943
4942INPUT MULTITOUCH (MT) PROTOCOL 4944INPUT MULTITOUCH (MT) PROTOCOL
4943M: Henrik Rydberg <rydberg@euromail.se> 4945M: Henrik Rydberg <rydberg@bitmath.org>
4944L: linux-input@vger.kernel.org 4946L: linux-input@vger.kernel.org
4945T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git 4947T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
4946S: Maintained 4948S: Odd fixes
4947F: Documentation/input/multi-touch-protocol.txt 4949F: Documentation/input/multi-touch-protocol.txt
4948F: drivers/input/input-mt.c 4950F: drivers/input/input-mt.c
4949K: \b(ABS|SYN)_MT_ 4951K: \b(ABS|SYN)_MT_
@@ -5279,6 +5281,15 @@ W: www.open-iscsi.org
5279Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5281Q: http://patchwork.kernel.org/project/linux-rdma/list/
5280F: drivers/infiniband/ulp/iser/ 5282F: drivers/infiniband/ulp/iser/
5281 5283
5284ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
5285M: Sagi Grimberg <sagig@mellanox.com>
5286T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
5287L: linux-rdma@vger.kernel.org
5288L: target-devel@vger.kernel.org
5289S: Supported
5290W: http://www.linux-iscsi.org
5291F: drivers/infiniband/ulp/isert
5292
5282ISDN SUBSYSTEM 5293ISDN SUBSYSTEM
5283M: Karsten Keil <isdn@linux-pingi.de> 5294M: Karsten Keil <isdn@linux-pingi.de>
5284L: isdn4linux@listserv.isdn4linux.de (subscribers-only) 5295L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -7399,6 +7410,7 @@ F: drivers/crypto/picoxcell*
7399PIN CONTROL SUBSYSTEM 7410PIN CONTROL SUBSYSTEM
7400M: Linus Walleij <linus.walleij@linaro.org> 7411M: Linus Walleij <linus.walleij@linaro.org>
7401L: linux-gpio@vger.kernel.org 7412L: linux-gpio@vger.kernel.org
7413T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
7402S: Maintained 7414S: Maintained
7403F: drivers/pinctrl/ 7415F: drivers/pinctrl/
7404F: include/linux/pinctrl/ 7416F: include/linux/pinctrl/
@@ -7737,8 +7749,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
7737F: drivers/scsi/qla2xxx/ 7749F: drivers/scsi/qla2xxx/
7738 7750
7739QLOGIC QLA4XXX iSCSI DRIVER 7751QLOGIC QLA4XXX iSCSI DRIVER
7740M: Vikas Chaudhary <vikas.chaudhary@qlogic.com> 7752M: QLogic-Storage-Upstream@qlogic.com
7741M: iscsi-driver@qlogic.com
7742L: linux-scsi@vger.kernel.org 7753L: linux-scsi@vger.kernel.org
7743S: Supported 7754S: Supported
7744F: Documentation/scsi/LICENSE.qla4xxx 7755F: Documentation/scsi/LICENSE.qla4xxx
@@ -9533,7 +9544,8 @@ F: drivers/platform/x86/thinkpad_acpi.c
9533TI BANDGAP AND THERMAL DRIVER 9544TI BANDGAP AND THERMAL DRIVER
9534M: Eduardo Valentin <edubezval@gmail.com> 9545M: Eduardo Valentin <edubezval@gmail.com>
9535L: linux-pm@vger.kernel.org 9546L: linux-pm@vger.kernel.org
9536S: Supported 9547L: linux-omap@vger.kernel.org
9548S: Maintained
9537F: drivers/thermal/ti-soc-thermal/ 9549F: drivers/thermal/ti-soc-thermal/
9538 9550
9539TI CLOCK DRIVER 9551TI CLOCK DRIVER
diff --git a/Makefile b/Makefile
index b1c3254441f3..fb93350cf645 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 19 2PATCHLEVEL = 19
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc5
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE := \
391# Needed to be compatible with the O= option 391# Needed to be compatible with the O= option
392LINUXINCLUDE := \ 392LINUXINCLUDE := \
393 -I$(srctree)/arch/$(hdr-arch)/include \ 393 -I$(srctree)/arch/$(hdr-arch)/include \
394 -Iarch/$(hdr-arch)/include/generated/uapi \
394 -Iarch/$(hdr-arch)/include/generated \ 395 -Iarch/$(hdr-arch)/include/generated \
395 $(if $(KBUILD_SRC), -I$(srctree)/include) \ 396 $(if $(KBUILD_SRC), -I$(srctree)/include) \
396 -Iinclude \ 397 -Iinclude \
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 1466580be295..70b1943a86b1 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -203,27 +203,3 @@
203 compatible = "linux,spdif-dir"; 203 compatible = "linux,spdif-dir";
204 }; 204 };
205}; 205};
206
207&pinctrl {
208 /*
209 * These pins might be muxed as I2S by
210 * the bootloader, but it conflicts
211 * with the real I2S pins that are
212 * muxed using i2s_pins. We must mux
213 * those pins to a function other than
214 * I2S.
215 */
216 pinctrl-0 = <&hog_pins1 &hog_pins2>;
217 pinctrl-names = "default";
218
219 hog_pins1: hog-pins1 {
220 marvell,pins = "mpp6", "mpp8", "mpp10",
221 "mpp12", "mpp13";
222 marvell,function = "gpio";
223 };
224
225 hog_pins2: hog-pins2 {
226 marvell,pins = "mpp5", "mpp7", "mpp9";
227 marvell,function = "gpo";
228 };
229};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 1467750e3377..e8c6c600a5b6 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -953,6 +953,8 @@
953 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>; 953 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>;
954 pinctrl-names = "default"; 954 pinctrl-names = "default";
955 pinctrl-0 = <&pinctrl_fb>; 955 pinctrl-0 = <&pinctrl_fb>;
956 clocks = <&lcd_clk>, <&lcd_clk>;
957 clock-names = "lcdc_clk", "hclk";
956 status = "disabled"; 958 status = "disabled";
957 }; 959 };
958 960
diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
index 28e7e2060c33..a98ac1bd8f65 100644
--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
+++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
@@ -65,6 +65,8 @@
65}; 65};
66 66
67&sdhci2 { 67&sdhci2 {
68 broken-cd;
69 bus-width = <8>;
68 non-removable; 70 non-removable;
69 status = "okay"; 71 status = "okay";
70}; 72};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 35253c947a7c..e2f61f27944e 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -83,7 +83,8 @@
83 compatible = "mrvl,pxav3-mmc"; 83 compatible = "mrvl,pxav3-mmc";
84 reg = <0xab1000 0x200>; 84 reg = <0xab1000 0x200>;
85 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 85 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
86 clocks = <&chip CLKID_SDIO1XIN>; 86 clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
87 clock-names = "io", "core";
87 status = "disabled"; 88 status = "disabled";
88 }; 89 };
89 90
@@ -348,36 +349,6 @@
348 interrupt-parent = <&gic>; 349 interrupt-parent = <&gic>;
349 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; 350 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
350 }; 351 };
351
352 gpio4: gpio@5000 {
353 compatible = "snps,dw-apb-gpio";
354 reg = <0x5000 0x400>;
355 #address-cells = <1>;
356 #size-cells = <0>;
357
358 porte: gpio-port@4 {
359 compatible = "snps,dw-apb-gpio-port";
360 gpio-controller;
361 #gpio-cells = <2>;
362 snps,nr-gpios = <32>;
363 reg = <0>;
364 };
365 };
366
367 gpio5: gpio@c000 {
368 compatible = "snps,dw-apb-gpio";
369 reg = <0xc000 0x400>;
370 #address-cells = <1>;
371 #size-cells = <0>;
372
373 portf: gpio-port@5 {
374 compatible = "snps,dw-apb-gpio-port";
375 gpio-controller;
376 #gpio-cells = <2>;
377 snps,nr-gpios = <32>;
378 reg = <0>;
379 };
380 };
381 }; 352 };
382 353
383 chip: chip-control@ea0000 { 354 chip: chip-control@ea0000 {
@@ -466,6 +437,21 @@
466 ranges = <0 0xfc0000 0x10000>; 437 ranges = <0 0xfc0000 0x10000>;
467 interrupt-parent = <&sic>; 438 interrupt-parent = <&sic>;
468 439
440 sm_gpio1: gpio@5000 {
441 compatible = "snps,dw-apb-gpio";
442 reg = <0x5000 0x400>;
443 #address-cells = <1>;
444 #size-cells = <0>;
445
446 portf: gpio-port@5 {
447 compatible = "snps,dw-apb-gpio-port";
448 gpio-controller;
449 #gpio-cells = <2>;
450 snps,nr-gpios = <32>;
451 reg = <0>;
452 };
453 };
454
469 i2c2: i2c@7000 { 455 i2c2: i2c@7000 {
470 compatible = "snps,designware-i2c"; 456 compatible = "snps,designware-i2c";
471 #address-cells = <1>; 457 #address-cells = <1>;
@@ -516,6 +502,21 @@
516 status = "disabled"; 502 status = "disabled";
517 }; 503 };
518 504
505 sm_gpio0: gpio@c000 {
506 compatible = "snps,dw-apb-gpio";
507 reg = <0xc000 0x400>;
508 #address-cells = <1>;
509 #size-cells = <0>;
510
511 porte: gpio-port@4 {
512 compatible = "snps,dw-apb-gpio-port";
513 gpio-controller;
514 #gpio-cells = <2>;
515 snps,nr-gpios = <32>;
516 reg = <0>;
517 };
518 };
519
519 sysctrl: pin-controller@d000 { 520 sysctrl: pin-controller@d000 {
520 compatible = "marvell,berlin2q-system-ctrl"; 521 compatible = "marvell,berlin2q-system-ctrl";
521 reg = <0xd000 0x100>; 522 reg = <0xd000 0x100>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 10b725c7bfc0..ad4118f7e1a6 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -499,23 +499,23 @@
499 }; 499 };
500 partition@5 { 500 partition@5 {
501 label = "QSPI.u-boot-spl-os"; 501 label = "QSPI.u-boot-spl-os";
502 reg = <0x00140000 0x00010000>; 502 reg = <0x00140000 0x00080000>;
503 }; 503 };
504 partition@6 { 504 partition@6 {
505 label = "QSPI.u-boot-env"; 505 label = "QSPI.u-boot-env";
506 reg = <0x00150000 0x00010000>; 506 reg = <0x001c0000 0x00010000>;
507 }; 507 };
508 partition@7 { 508 partition@7 {
509 label = "QSPI.u-boot-env.backup1"; 509 label = "QSPI.u-boot-env.backup1";
510 reg = <0x00160000 0x0010000>; 510 reg = <0x001d0000 0x0010000>;
511 }; 511 };
512 partition@8 { 512 partition@8 {
513 label = "QSPI.kernel"; 513 label = "QSPI.kernel";
514 reg = <0x00170000 0x0800000>; 514 reg = <0x001e0000 0x0800000>;
515 }; 515 };
516 partition@9 { 516 partition@9 {
517 label = "QSPI.file-system"; 517 label = "QSPI.file-system";
518 reg = <0x00970000 0x01690000>; 518 reg = <0x009e0000 0x01620000>;
519 }; 519 };
520 }; 520 };
521}; 521};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 0a229fcd7acf..d75c89d7666a 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -736,7 +736,7 @@
736 736
737 dp_phy: video-phy@10040720 { 737 dp_phy: video-phy@10040720 {
738 compatible = "samsung,exynos5250-dp-video-phy"; 738 compatible = "samsung,exynos5250-dp-video-phy";
739 reg = <0x10040720 4>; 739 samsung,pmu-syscon = <&pmu_system_controller>;
740 #phy-cells = <0>; 740 #phy-cells = <0>;
741 }; 741 };
742 742
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index aa7a7d727a7e..db2c1c4cd900 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -372,3 +372,7 @@
372&usbdrd_dwc3_1 { 372&usbdrd_dwc3_1 {
373 dr_mode = "host"; 373 dr_mode = "host";
374}; 374};
375
376&cci {
377 status = "disabled";
378};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 517e50f6760b..6d38f8bfd0e6 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -120,7 +120,7 @@
120 }; 120 };
121 }; 121 };
122 122
123 cci@10d20000 { 123 cci: cci@10d20000 {
124 compatible = "arm,cci-400"; 124 compatible = "arm,cci-400";
125 #address-cells = <1>; 125 #address-cells = <1>;
126 #size-cells = <1>; 126 #size-cells = <1>;
@@ -503,8 +503,8 @@
503 }; 503 };
504 504
505 dp_phy: video-phy@10040728 { 505 dp_phy: video-phy@10040728 {
506 compatible = "samsung,exynos5250-dp-video-phy"; 506 compatible = "samsung,exynos5420-dp-video-phy";
507 reg = <0x10040728 4>; 507 samsung,pmu-syscon = <&pmu_system_controller>;
508 #phy-cells = <0>; 508 #phy-cells = <0>;
509 }; 509 };
510 510
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 58d3c3cf2923..d238676a9107 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -162,7 +162,7 @@
162 #size-cells = <0>; 162 #size-cells = <0>;
163 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; 163 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
164 reg = <0x43fa4000 0x4000>; 164 reg = <0x43fa4000 0x4000>;
165 clocks = <&clks 62>, <&clks 62>; 165 clocks = <&clks 78>, <&clks 78>;
166 clock-names = "ipg", "per"; 166 clock-names = "ipg", "per";
167 interrupts = <14>; 167 interrupts = <14>;
168 status = "disabled"; 168 status = "disabled";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 56569cecaa78..649befeb2cf9 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -127,24 +127,12 @@
127 #address-cells = <1>; 127 #address-cells = <1>;
128 #size-cells = <0>; 128 #size-cells = <0>;
129 129
130 reg_usbh1_vbus: regulator@0 { 130 reg_hub_reset: regulator@0 {
131 compatible = "regulator-fixed";
132 pinctrl-names = "default";
133 pinctrl-0 = <&pinctrl_usbh1reg>;
134 reg = <0>;
135 regulator-name = "usbh1_vbus";
136 regulator-min-microvolt = <5000000>;
137 regulator-max-microvolt = <5000000>;
138 gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
139 enable-active-high;
140 };
141
142 reg_usbotg_vbus: regulator@1 {
143 compatible = "regulator-fixed"; 131 compatible = "regulator-fixed";
144 pinctrl-names = "default"; 132 pinctrl-names = "default";
145 pinctrl-0 = <&pinctrl_usbotgreg>; 133 pinctrl-0 = <&pinctrl_usbotgreg>;
146 reg = <1>; 134 reg = <0>;
147 regulator-name = "usbotg_vbus"; 135 regulator-name = "hub_reset";
148 regulator-min-microvolt = <5000000>; 136 regulator-min-microvolt = <5000000>;
149 regulator-max-microvolt = <5000000>; 137 regulator-max-microvolt = <5000000>;
150 gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>; 138 gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
@@ -176,6 +164,7 @@
176 reg = <0>; 164 reg = <0>;
177 clocks = <&clks IMX5_CLK_DUMMY>; 165 clocks = <&clks IMX5_CLK_DUMMY>;
178 clock-names = "main_clk"; 166 clock-names = "main_clk";
167 reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
179 }; 168 };
180 }; 169 };
181}; 170};
@@ -419,7 +408,7 @@
419&usbh1 { 408&usbh1 {
420 pinctrl-names = "default"; 409 pinctrl-names = "default";
421 pinctrl-0 = <&pinctrl_usbh1>; 410 pinctrl-0 = <&pinctrl_usbh1>;
422 vbus-supply = <&reg_usbh1_vbus>; 411 vbus-supply = <&reg_hub_reset>;
423 fsl,usbphy = <&usbh1phy>; 412 fsl,usbphy = <&usbh1phy>;
424 phy_type = "ulpi"; 413 phy_type = "ulpi";
425 status = "okay"; 414 status = "okay";
@@ -429,7 +418,6 @@
429 dr_mode = "otg"; 418 dr_mode = "otg";
430 disable-over-current; 419 disable-over-current;
431 phy_type = "utmi_wide"; 420 phy_type = "utmi_wide";
432 vbus-supply = <&reg_usbotg_vbus>;
433 status = "okay"; 421 status = "okay";
434}; 422};
435 423
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4fc03b7f1cee..2109d0763c1b 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -335,8 +335,8 @@
335 vpu: vpu@02040000 { 335 vpu: vpu@02040000 {
336 compatible = "cnm,coda960"; 336 compatible = "cnm,coda960";
337 reg = <0x02040000 0x3c000>; 337 reg = <0x02040000 0x3c000>;
338 interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>, 338 interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
339 <0 12 IRQ_TYPE_LEVEL_HIGH>; 339 <0 3 IRQ_TYPE_LEVEL_HIGH>;
340 interrupt-names = "bit", "jpeg"; 340 interrupt-names = "bit", "jpeg";
341 clocks = <&clks IMX6QDL_CLK_VPU_AXI>, 341 clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
342 <&clks IMX6QDL_CLK_MMDC_CH0_AXI>, 342 <&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 1e6e5cc1c14c..8c1febd7e3f2 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -159,13 +159,28 @@
159 pinctrl-0 = <&pinctrl_enet1>; 159 pinctrl-0 = <&pinctrl_enet1>;
160 phy-supply = <&reg_enet_3v3>; 160 phy-supply = <&reg_enet_3v3>;
161 phy-mode = "rgmii"; 161 phy-mode = "rgmii";
162 phy-handle = <&ethphy1>;
162 status = "okay"; 163 status = "okay";
164
165 mdio {
166 #address-cells = <1>;
167 #size-cells = <0>;
168
169 ethphy1: ethernet-phy@0 {
170 reg = <0>;
171 };
172
173 ethphy2: ethernet-phy@1 {
174 reg = <1>;
175 };
176 };
163}; 177};
164 178
165&fec2 { 179&fec2 {
166 pinctrl-names = "default"; 180 pinctrl-names = "default";
167 pinctrl-0 = <&pinctrl_enet2>; 181 pinctrl-0 = <&pinctrl_enet2>;
168 phy-mode = "rgmii"; 182 phy-mode = "rgmii";
183 phy-handle = <&ethphy2>;
169 status = "okay"; 184 status = "okay";
170}; 185};
171 186
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 657da14cb4b5..c70bb27ac65a 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -142,6 +142,7 @@
142 scfg: scfg@1570000 { 142 scfg: scfg@1570000 {
143 compatible = "fsl,ls1021a-scfg", "syscon"; 143 compatible = "fsl,ls1021a-scfg", "syscon";
144 reg = <0x0 0x1570000 0x0 0x10000>; 144 reg = <0x0 0x1570000 0x0 0x10000>;
145 big-endian;
145 }; 146 };
146 147
147 clockgen: clocking@1ee1000 { 148 clockgen: clocking@1ee1000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 53f3ca064140..b550c41b46f1 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -700,11 +700,9 @@
700 }; 700 };
701 }; 701 };
702 702
703 /* Ethernet is on some early development boards and qemu */
703 ethernet@gpmc { 704 ethernet@gpmc {
704 compatible = "smsc,lan91c94"; 705 compatible = "smsc,lan91c94";
705
706 status = "disabled";
707
708 interrupt-parent = <&gpio2>; 706 interrupt-parent = <&gpio2>;
709 interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ 707 interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */
710 reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ 708 reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 3e067dd65d0c..6194d673e80b 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -155,6 +155,15 @@
155}; 155};
156 156
157&pinctrl { 157&pinctrl {
158 pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
159 drive-strength = <8>;
160 };
161
162 pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
163 bias-pull-up;
164 drive-strength = <8>;
165 };
166
158 backlight { 167 backlight {
159 bl_en: bl-en { 168 bl_en: bl-en {
160 rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>; 169 rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -173,6 +182,27 @@
173 }; 182 };
174 }; 183 };
175 184
185 sdmmc {
186 /*
187 * Default drive strength isn't enough to achieve even
188 * high-speed mode on EVB board so bump up to 8ma.
189 */
190 sdmmc_bus4: sdmmc-bus4 {
191 rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
192 <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
193 <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
194 <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
195 };
196
197 sdmmc_clk: sdmmc-clk {
198 rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
199 };
200
201 sdmmc_cmd: sdmmc-cmd {
202 rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
203 };
204 };
205
176 usb { 206 usb {
177 host_vbus_drv: host-vbus-drv { 207 host_vbus_drv: host-vbus-drv {
178 rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>; 208 rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 49c10d33df30..77e03655aca3 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -176,7 +176,7 @@
176 "Headphone Jack", "HPOUTR", 176 "Headphone Jack", "HPOUTR",
177 "IN2L", "Line In Jack", 177 "IN2L", "Line In Jack",
178 "IN2R", "Line In Jack", 178 "IN2R", "Line In Jack",
179 "MICBIAS", "IN1L", 179 "Mic", "MICBIAS",
180 "IN1L", "Mic"; 180 "IN1L", "Mic";
181 181
182 atmel,ssc-controller = <&ssc0>; 182 atmel,ssc-controller = <&ssc0>;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 1b0f30c2c4a5..b94995d1889f 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1008,7 +1008,7 @@
1008 1008
1009 pit: timer@fc068630 { 1009 pit: timer@fc068630 {
1010 compatible = "atmel,at91sam9260-pit"; 1010 compatible = "atmel,at91sam9260-pit";
1011 reg = <0xfc068630 0xf>; 1011 reg = <0xfc068630 0x10>;
1012 interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>; 1012 interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
1013 clocks = <&h32ck>; 1013 clocks = <&h32ck>;
1014 }; 1014 };
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index a8c00ee7522a..3d0b8755caee 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -25,11 +25,11 @@
25 stmpe2401_1 { 25 stmpe2401_1 {
26 stmpe2401_1_nhk_mode: stmpe2401_1_nhk { 26 stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
27 nhk_cfg1 { 27 nhk_cfg1 {
28 ste,pins = "GPIO76_B20"; // IRQ line 28 pins = "GPIO76_B20"; // IRQ line
29 ste,input = <0>; 29 ste,input = <0>;
30 }; 30 };
31 nhk_cfg2 { 31 nhk_cfg2 {
32 ste,pins = "GPIO77_B8"; // reset line 32 pins = "GPIO77_B8"; // reset line
33 ste,output = <1>; 33 ste,output = <1>;
34 }; 34 };
35 }; 35 };
@@ -37,11 +37,11 @@
37 stmpe2401_2 { 37 stmpe2401_2 {
38 stmpe2401_2_nhk_mode: stmpe2401_2_nhk { 38 stmpe2401_2_nhk_mode: stmpe2401_2_nhk {
39 nhk_cfg1 { 39 nhk_cfg1 {
40 ste,pins = "GPIO78_A8"; // IRQ line 40 pins = "GPIO78_A8"; // IRQ line
41 ste,input = <0>; 41 ste,input = <0>;
42 }; 42 };
43 nhk_cfg2 { 43 nhk_cfg2 {
44 ste,pins = "GPIO79_C9"; // reset line 44 pins = "GPIO79_C9"; // reset line
45 ste,output = <1>; 45 ste,output = <1>;
46 }; 46 };
47 }; 47 };
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index a0f762159cb2..f2b64b1b00fa 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -129,13 +129,28 @@
129 129
130&fec0 { 130&fec0 {
131 phy-mode = "rmii"; 131 phy-mode = "rmii";
132 phy-handle = <&ethphy0>;
132 pinctrl-names = "default"; 133 pinctrl-names = "default";
133 pinctrl-0 = <&pinctrl_fec0>; 134 pinctrl-0 = <&pinctrl_fec0>;
134 status = "okay"; 135 status = "okay";
136
137 mdio {
138 #address-cells = <1>;
139 #size-cells = <0>;
140
141 ethphy0: ethernet-phy@0 {
142 reg = <0>;
143 };
144
145 ethphy1: ethernet-phy@1 {
146 reg = <1>;
147 };
148 };
135}; 149};
136 150
137&fec1 { 151&fec1 {
138 phy-mode = "rmii"; 152 phy-mode = "rmii";
153 phy-handle = <&ethphy1>;
139 pinctrl-names = "default"; 154 pinctrl-names = "default";
140 pinctrl-0 = <&pinctrl_fec1>; 155 pinctrl-0 = <&pinctrl_fec1>;
141 status = "okay"; 156 status = "okay";
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 5ef14de00a29..3d0c5d65c741 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y
84CONFIG_POWER_SUPPLY=y 84CONFIG_POWER_SUPPLY=y
85CONFIG_BATTERY_SBS=y 85CONFIG_BATTERY_SBS=y
86CONFIG_CHARGER_TPS65090=y 86CONFIG_CHARGER_TPS65090=y
87# CONFIG_HWMON is not set 87CONFIG_HWMON=y
88CONFIG_SENSORS_LM90=y
88CONFIG_THERMAL=y 89CONFIG_THERMAL=y
89CONFIG_EXYNOS_THERMAL=y 90CONFIG_EXYNOS_THERMAL=y
90CONFIG_EXYNOS_THERMAL_CORE=y 91CONFIG_EXYNOS_THERMAL_CORE=y
@@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y
109CONFIG_REGULATOR_S2MPS11=y 110CONFIG_REGULATOR_S2MPS11=y
110CONFIG_REGULATOR_S5M8767=y 111CONFIG_REGULATOR_S5M8767=y
111CONFIG_REGULATOR_TPS65090=y 112CONFIG_REGULATOR_TPS65090=y
113CONFIG_DRM=y
114CONFIG_DRM_BRIDGE=y
115CONFIG_DRM_PTN3460=y
116CONFIG_DRM_PS8622=y
117CONFIG_DRM_EXYNOS=y
118CONFIG_DRM_EXYNOS_FIMD=y
119CONFIG_DRM_EXYNOS_DP=y
120CONFIG_DRM_PANEL=y
121CONFIG_DRM_PANEL_SIMPLE=y
112CONFIG_FB=y 122CONFIG_FB=y
113CONFIG_FB_MODE_HELPERS=y 123CONFIG_FB_MODE_HELPERS=y
114CONFIG_FB_SIMPLE=y 124CONFIG_FB_SIMPLE=y
115CONFIG_EXYNOS_VIDEO=y 125CONFIG_EXYNOS_VIDEO=y
116CONFIG_EXYNOS_MIPI_DSI=y 126CONFIG_EXYNOS_MIPI_DSI=y
127CONFIG_BACKLIGHT_LCD_SUPPORT=y
128CONFIG_LCD_CLASS_DEVICE=y
129CONFIG_LCD_PLATFORM=y
130CONFIG_BACKLIGHT_CLASS_DEVICE=y
131CONFIG_BACKLIGHT_GENERIC=y
132CONFIG_BACKLIGHT_PWM=y
117CONFIG_FRAMEBUFFER_CONSOLE=y 133CONFIG_FRAMEBUFFER_CONSOLE=y
118CONFIG_FONTS=y 134CONFIG_FONTS=y
119CONFIG_FONT_7x14=y 135CONFIG_FONT_7x14=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2328fe752e9c..bc393b7e5ece 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -338,6 +338,7 @@ CONFIG_USB=y
338CONFIG_USB_XHCI_HCD=y 338CONFIG_USB_XHCI_HCD=y
339CONFIG_USB_XHCI_MVEBU=y 339CONFIG_USB_XHCI_MVEBU=y
340CONFIG_USB_EHCI_HCD=y 340CONFIG_USB_EHCI_HCD=y
341CONFIG_USB_EHCI_EXYNOS=y
341CONFIG_USB_EHCI_TEGRA=y 342CONFIG_USB_EHCI_TEGRA=y
342CONFIG_USB_EHCI_HCD_STI=y 343CONFIG_USB_EHCI_HCD_STI=y
343CONFIG_USB_EHCI_HCD_PLATFORM=y 344CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c2c3a852af9f..667d9d52aa01 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
68CONFIG_CPU_FREQ_GOV_POWERSAVE=y 68CONFIG_CPU_FREQ_GOV_POWERSAVE=y
69CONFIG_CPU_FREQ_GOV_USERSPACE=y 69CONFIG_CPU_FREQ_GOV_USERSPACE=y
70CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y 70CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
71CONFIG_GENERIC_CPUFREQ_CPU0=y 71CONFIG_CPUFREQ_DT=y
72# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set 72# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
73CONFIG_CPU_IDLE=y 73CONFIG_CPU_IDLE=y
74CONFIG_BINFMT_MISC=y 74CONFIG_BINFMT_MISC=y
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 705bb7620673..0c3f5a0dafd3 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -413,6 +413,7 @@
413#define __NR_getrandom (__NR_SYSCALL_BASE+384) 413#define __NR_getrandom (__NR_SYSCALL_BASE+384)
414#define __NR_memfd_create (__NR_SYSCALL_BASE+385) 414#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
415#define __NR_bpf (__NR_SYSCALL_BASE+386) 415#define __NR_bpf (__NR_SYSCALL_BASE+386)
416#define __NR_execveat (__NR_SYSCALL_BASE+387)
416 417
417/* 418/*
418 * The following SWIs are ARM private. 419 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index e51833f8cc38..05745eb838c5 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -396,6 +396,7 @@
396 CALL(sys_getrandom) 396 CALL(sys_getrandom)
397/* 385 */ CALL(sys_memfd_create) 397/* 385 */ CALL(sys_memfd_create)
398 CALL(sys_bpf) 398 CALL(sys_bpf)
399 CALL(sys_execveat)
399#ifndef syscalls_counted 400#ifndef syscalls_counted
400.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 401.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
401#define syscalls_counted 402#define syscalls_counted
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
index 6e4379c67cbc..592dda3f21ff 100644
--- a/arch/arm/kernel/perf_regs.c
+++ b/arch/arm/kernel/perf_regs.c
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
28{ 28{
29 return PERF_SAMPLE_REGS_ABI_32; 29 return PERF_SAMPLE_REGS_ABI_32;
30} 30}
31
32void perf_get_regs_user(struct perf_regs *regs_user,
33 struct pt_regs *regs,
34 struct pt_regs *regs_user_copy)
35{
36 regs_user->regs = task_pt_regs(current);
37 regs_user->abi = perf_reg_abi(current);
38}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f9c863911038..715ae19bc7c8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v)
1046 seq_printf(m, "model name\t: %s rev %d (%s)\n", 1046 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1047 cpu_name, cpuid & 15, elf_platform); 1047 cpu_name, cpuid & 15, elf_platform);
1048 1048
1049#if defined(CONFIG_SMP)
1050 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1051 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1052 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1053#else
1054 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1055 loops_per_jiffy / (500000/HZ),
1056 (loops_per_jiffy / (5000/HZ)) % 100);
1057#endif
1049 /* dump out the processor features */ 1058 /* dump out the processor features */
1050 seq_puts(m, "Features\t: "); 1059 seq_puts(m, "Features\t: ");
1051 1060
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5e6052e18850..86ef244c5a24 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
387 387
388void __init smp_cpus_done(unsigned int max_cpus) 388void __init smp_cpus_done(unsigned int max_cpus)
389{ 389{
390 int cpu;
391 unsigned long bogosum = 0;
392
393 for_each_online_cpu(cpu)
394 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
395
396 printk(KERN_INFO "SMP: Total of %d processors activated "
397 "(%lu.%02lu BogoMIPS).\n",
398 num_online_cpus(),
399 bogosum / (500000/HZ),
400 (bogosum / (5000/HZ)) % 100);
401
390 hyp_mode_check(); 402 hyp_mode_check();
391} 403}
392 404
diff --git a/arch/arm/mach-at91/board-dt-sama5.c b/arch/arm/mach-at91/board-dt-sama5.c
index 8fb9ef5333f1..97f7367d32b8 100644
--- a/arch/arm/mach-at91/board-dt-sama5.c
+++ b/arch/arm/mach-at91/board-dt-sama5.c
@@ -17,6 +17,7 @@
17#include <linux/of_platform.h> 17#include <linux/of_platform.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/clk-provider.h> 19#include <linux/clk-provider.h>
20#include <linux/phy.h>
20 21
21#include <asm/setup.h> 22#include <asm/setup.h>
22#include <asm/irq.h> 23#include <asm/irq.h>
@@ -26,8 +27,25 @@
26 27
27#include "generic.h" 28#include "generic.h"
28 29
30static int ksz8081_phy_fixup(struct phy_device *phy)
31{
32 int value;
33
34 value = phy_read(phy, 0x16);
35 value &= ~0x20;
36 phy_write(phy, 0x16, value);
37
38 return 0;
39}
40
29static void __init sama5_dt_device_init(void) 41static void __init sama5_dt_device_init(void)
30{ 42{
43 if (of_machine_is_compatible("atmel,sama5d4ek") &&
44 IS_ENABLED(CONFIG_PHYLIB)) {
45 phy_register_fixup_for_id("fc028000.etherne:00",
46 ksz8081_phy_fixup);
47 }
48
31 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 49 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
32} 50}
33 51
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 5951660d1bd2..2daef619d053 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
144 post_div_table[1].div = 1; 144 post_div_table[1].div = 1;
145 post_div_table[2].div = 1; 145 post_div_table[2].div = 1;
146 video_div_table[1].div = 1; 146 video_div_table[1].div = 1;
147 video_div_table[2].div = 1; 147 video_div_table[3].div = 1;
148 } 148 }
149 149
150 clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); 150 clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c
index 17354a11356f..5a3e5a159e70 100644
--- a/arch/arm/mach-imx/clk-imx6sx.c
+++ b/arch/arm/mach-imx/clk-imx6sx.c
@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
558 clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); 558 clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
559 clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); 559 clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
560 560
561 clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
562 clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
563
561 /* Set initial power mode */ 564 /* Set initial power mode */
562 imx6q_set_lpm(WAIT_CLOCKED); 565 imx6q_set_lpm(WAIT_CLOCKED);
563} 566}
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 608079a1aba6..b61c049f92d6 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -77,6 +77,24 @@ MACHINE_END
77#endif 77#endif
78 78
79#ifdef CONFIG_ARCH_OMAP3 79#ifdef CONFIG_ARCH_OMAP3
80/* Some boards need board name for legacy userspace in /proc/cpuinfo */
81static const char *const n900_boards_compat[] __initconst = {
82 "nokia,omap3-n900",
83 NULL,
84};
85
86DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
87 .reserve = omap_reserve,
88 .map_io = omap3_map_io,
89 .init_early = omap3430_init_early,
90 .init_machine = omap_generic_init,
91 .init_late = omap3_init_late,
92 .init_time = omap3_sync32k_timer_init,
93 .dt_compat = n900_boards_compat,
94 .restart = omap3xxx_restart,
95MACHINE_END
96
97/* Generic omap3 boards, most boards can use these */
80static const char *const omap3_boards_compat[] __initconst = { 98static const char *const omap3_boards_compat[] __initconst = {
81 "ti,omap3430", 99 "ti,omap3430",
82 "ti,omap3", 100 "ti,omap3",
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 377eea849e7b..db57741c9c8a 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -249,6 +249,7 @@ extern void omap4_cpu_die(unsigned int cpu);
249extern struct smp_operations omap4_smp_ops; 249extern struct smp_operations omap4_smp_ops;
250 250
251extern void omap5_secondary_startup(void); 251extern void omap5_secondary_startup(void);
252extern void omap5_secondary_hyp_startup(void);
252#endif 253#endif
253 254
254#if defined(CONFIG_SMP) && defined(CONFIG_PM) 255#if defined(CONFIG_SMP) && defined(CONFIG_PM)
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a3c013345c45..a80ac2d70bb1 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -286,6 +286,10 @@
286#define OMAP5XXX_CONTROL_STATUS 0x134 286#define OMAP5XXX_CONTROL_STATUS 0x134
287#define OMAP5_DEVICETYPE_MASK (0x7 << 6) 287#define OMAP5_DEVICETYPE_MASK (0x7 << 6)
288 288
289/* DRA7XX CONTROL CORE BOOTSTRAP */
290#define DRA7_CTRL_CORE_BOOTSTRAP 0x6c4
291#define DRA7_SPEEDSELECT_MASK (0x3 << 8)
292
289/* 293/*
290 * REVISIT: This list of registers is not comprehensive - there are more 294 * REVISIT: This list of registers is not comprehensive - there are more
291 * that should be added. 295 * that should be added.
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4993d4bfe9b2..6d1dffca6c7b 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -22,6 +22,7 @@
22 22
23/* Physical address needed since MMU not enabled yet on secondary core */ 23/* Physical address needed since MMU not enabled yet on secondary core */
24#define AUX_CORE_BOOT0_PA 0x48281800 24#define AUX_CORE_BOOT0_PA 0x48281800
25#define API_HYP_ENTRY 0x102
25 26
26/* 27/*
27 * OMAP5 specific entry point for secondary CPU to jump from ROM 28 * OMAP5 specific entry point for secondary CPU to jump from ROM
@@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
41 b secondary_startup 42 b secondary_startup
42ENDPROC(omap5_secondary_startup) 43ENDPROC(omap5_secondary_startup)
43/* 44/*
45 * Same as omap5_secondary_startup except we call into the ROM to
46 * enable HYP mode first. This is called instead of
47 * omap5_secondary_startup if the primary CPU was put into HYP mode by
48 * the boot loader.
49 */
50ENTRY(omap5_secondary_hyp_startup)
51wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
52 ldr r0, [r2]
53 mov r0, r0, lsr #5
54 mrc p15, 0, r4, c0, c0, 5
55 and r4, r4, #0x0f
56 cmp r0, r4
57 bne wait_2
58 ldr r12, =API_HYP_ENTRY
59 adr r0, hyp_boot
60 smc #0
61hyp_boot:
62 b secondary_startup
63ENDPROC(omap5_secondary_hyp_startup)
64/*
44 * OMAP4 specific entry point for secondary CPU to jump from ROM 65 * OMAP4 specific entry point for secondary CPU to jump from ROM
45 * code. This routine also provides a holding flag into which 66 * code. This routine also provides a holding flag into which
46 * secondary core is held until we're ready for it to initialise. 67 * secondary core is held until we're ready for it to initialise.
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 256e84ef0f67..5305ec7341ec 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -22,6 +22,7 @@
22#include <linux/irqchip/arm-gic.h> 22#include <linux/irqchip/arm-gic.h>
23 23
24#include <asm/smp_scu.h> 24#include <asm/smp_scu.h>
25#include <asm/virt.h>
25 26
26#include "omap-secure.h" 27#include "omap-secure.h"
27#include "omap-wakeupgen.h" 28#include "omap-wakeupgen.h"
@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
227 if (omap_secure_apis_support()) 228 if (omap_secure_apis_support())
228 omap_auxcoreboot_addr(virt_to_phys(startup_addr)); 229 omap_auxcoreboot_addr(virt_to_phys(startup_addr));
229 else 230 else
230 writel_relaxed(virt_to_phys(omap5_secondary_startup), 231 /*
231 base + OMAP_AUX_CORE_BOOT_1); 232 * If the boot CPU is in HYP mode then start secondary
233 * CPU in HYP mode as well.
234 */
235 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
236 writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
237 base + OMAP_AUX_CORE_BOOT_1);
238 else
239 writel_relaxed(virt_to_phys(omap5_secondary_startup),
240 base + OMAP_AUX_CORE_BOOT_1);
232 241
233} 242}
234 243
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 4f61148ec168..7d45c84c69ba 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -54,6 +54,7 @@
54 54
55#include "soc.h" 55#include "soc.h"
56#include "common.h" 56#include "common.h"
57#include "control.h"
57#include "powerdomain.h" 58#include "powerdomain.h"
58#include "omap-secure.h" 59#include "omap-secure.h"
59 60
@@ -496,7 +497,8 @@ static void __init realtime_counter_init(void)
496 void __iomem *base; 497 void __iomem *base;
497 static struct clk *sys_clk; 498 static struct clk *sys_clk;
498 unsigned long rate; 499 unsigned long rate;
499 unsigned int reg, num, den; 500 unsigned int reg;
501 unsigned long long num, den;
500 502
501 base = ioremap(REALTIME_COUNTER_BASE, SZ_32); 503 base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
502 if (!base) { 504 if (!base) {
@@ -511,13 +513,42 @@ static void __init realtime_counter_init(void)
511 } 513 }
512 514
513 rate = clk_get_rate(sys_clk); 515 rate = clk_get_rate(sys_clk);
516
517 if (soc_is_dra7xx()) {
518 /*
519 * Errata i856 says the 32.768KHz crystal does not start at
520 * power on, so the CPU falls back to an emulated 32KHz clock
521 * based on sysclk / 610 instead. This causes the master counter
522 * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2
523 * (OR sysclk * 75 / 244)
524 *
525 * This affects at least the DRA7/AM572x 1.0, 1.1 revisions.
526 * Of course any board built without a populated 32.768KHz
527 * crystal would also need this fix even if the CPU is fixed
528 * later.
529 *
530 * Either case can be detected by using the two speedselect bits
531 * If they are not 0, then the 32.768KHz clock driving the
532 * coarse counter that corrects the fine counter every time it
533 * ticks is actually rate/610 rather than 32.768KHz and we
534 * should compensate to avoid the 570ppm (at 20MHz, much worse
535 * at other rates) too fast system time.
536 */
537 reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP);
538 if (reg & DRA7_SPEEDSELECT_MASK) {
539 num = 75;
540 den = 244;
541 goto sysclk1_based;
542 }
543 }
544
514 /* Numerator/denumerator values refer TRM Realtime Counter section */ 545 /* Numerator/denumerator values refer TRM Realtime Counter section */
515 switch (rate) { 546 switch (rate) {
516 case 1200000: 547 case 12000000:
517 num = 64; 548 num = 64;
518 den = 125; 549 den = 125;
519 break; 550 break;
520 case 1300000: 551 case 13000000:
521 num = 768; 552 num = 768;
522 den = 1625; 553 den = 1625;
523 break; 554 break;
@@ -529,11 +560,11 @@ static void __init realtime_counter_init(void)
529 num = 192; 560 num = 192;
530 den = 625; 561 den = 625;
531 break; 562 break;
532 case 2600000: 563 case 26000000:
533 num = 384; 564 num = 384;
534 den = 1625; 565 den = 1625;
535 break; 566 break;
536 case 2700000: 567 case 27000000:
537 num = 256; 568 num = 256;
538 den = 1125; 569 den = 1125;
539 break; 570 break;
@@ -545,6 +576,7 @@ static void __init realtime_counter_init(void)
545 break; 576 break;
546 } 577 }
547 578
579sysclk1_based:
548 /* Program numerator and denumerator registers */ 580 /* Program numerator and denumerator registers */
549 reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) & 581 reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) &
550 NUMERATOR_DENUMERATOR_MASK; 582 NUMERATOR_DENUMERATOR_MASK;
@@ -556,7 +588,7 @@ static void __init realtime_counter_init(void)
556 reg |= den; 588 reg |= den;
557 writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET); 589 writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
558 590
559 arch_timer_freq = (rate / den) * num; 591 arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den);
560 set_cntfreq(); 592 set_cntfreq();
561 593
562 iounmap(base); 594 iounmap(base);
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d226b71d21d5..a611f4852582 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -19,11 +19,37 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/of_platform.h> 20#include <linux/of_platform.h>
21#include <linux/irqchip.h> 21#include <linux/irqchip.h>
22#include <linux/clk-provider.h>
23#include <linux/clocksource.h>
24#include <linux/mfd/syscon.h>
25#include <linux/regmap.h>
22#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
23#include <asm/mach/map.h> 27#include <asm/mach/map.h>
24#include <asm/hardware/cache-l2x0.h> 28#include <asm/hardware/cache-l2x0.h>
25#include "core.h" 29#include "core.h"
26 30
31#define RK3288_GRF_SOC_CON0 0x244
32
33static void __init rockchip_timer_init(void)
34{
35 if (of_machine_is_compatible("rockchip,rk3288")) {
36 struct regmap *grf;
37
38 /*
39 * Disable auto jtag/sdmmc switching that causes issues
40 * with the mmc controllers making them unreliable
41 */
42 grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf");
43 if (!IS_ERR(grf))
44 regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000);
45 else
46 pr_err("rockchip: could not get grf syscon\n");
47 }
48
49 of_clk_init(NULL);
50 clocksource_of_init();
51}
52
27static void __init rockchip_dt_init(void) 53static void __init rockchip_dt_init(void)
28{ 54{
29 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 55 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -42,6 +68,7 @@ static const char * const rockchip_board_dt_compat[] = {
42DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") 68DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
43 .l2c_aux_val = 0, 69 .l2c_aux_val = 0,
44 .l2c_aux_mask = ~0, 70 .l2c_aux_mask = ~0,
71 .init_time = rockchip_timer_init,
45 .dt_compat = rockchip_board_dt_compat, 72 .dt_compat = rockchip_board_dt_compat,
46 .init_machine = rockchip_dt_init, 73 .init_machine = rockchip_dt_init,
47MACHINE_END 74MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 79ad93dfdae4..d191cf419731 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void)
800 void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10); 800 void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
801 void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4); 801 void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
802 802
803#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
804 void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
805 void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
806
807 gic_init(0, 29, gic_dist_base, gic_cpu_base);
808#else
803 irqchip_init(); 809 irqchip_init();
810#endif
804 811
805 /* route signals to GIC */ 812 /* route signals to GIC */
806 iowrite32(0x0, pfc_inta_ctrl); 813 iowrite32(0x0, pfc_inta_ctrl);
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 93ebe3430bfe..fb5e1bb34be8 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = {
595 595
596static struct renesas_intc_irqpin_config irqpin0_platform_data = { 596static struct renesas_intc_irqpin_config irqpin0_platform_data = {
597 .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ 597 .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
598 .control_parent = true,
598}; 599};
599 600
600static struct resource irqpin0_resources[] = { 601static struct resource irqpin0_resources[] = {
@@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = {
656 657
657static struct renesas_intc_irqpin_config irqpin2_platform_data = { 658static struct renesas_intc_irqpin_config irqpin2_platform_data = {
658 .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ 659 .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
660 .control_parent = true,
659}; 661};
660 662
661static struct resource irqpin2_resources[] = { 663static struct resource irqpin2_resources[] = {
@@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = {
686 688
687static struct renesas_intc_irqpin_config irqpin3_platform_data = { 689static struct renesas_intc_irqpin_config irqpin3_platform_data = {
688 .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ 690 .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
691 .control_parent = true,
689}; 692};
690 693
691static struct resource irqpin3_resources[] = { 694static struct resource irqpin3_resources[] = {
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 59424937e52b..9fe8e241335c 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
220 static const char units[] = "KMGTPE"; 220 static const char units[] = "KMGTPE";
221 u64 prot = val & pg_level[level].mask; 221 u64 prot = val & pg_level[level].mask;
222 222
223 if (addr < USER_PGTABLES_CEILING)
224 return;
225
226 if (!st->level) { 223 if (!st->level) {
227 st->level = level; 224 st->level = level;
228 st->current_prot = prot; 225 st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
308 pgd_t *pgd = swapper_pg_dir; 305 pgd_t *pgd = swapper_pg_dir;
309 struct pg_state st; 306 struct pg_state st;
310 unsigned long addr; 307 unsigned long addr;
311 unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; 308 unsigned i;
312 309
313 memset(&st, 0, sizeof(st)); 310 memset(&st, 0, sizeof(st));
314 st.seq = m; 311 st.seq = m;
315 st.marker = address_markers; 312 st.marker = address_markers;
316 313
317 pgd += pgdoff; 314 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
318
319 for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
320 addr = i * PGDIR_SIZE; 315 addr = i * PGDIR_SIZE;
321 if (!pgd_none(*pgd)) { 316 if (!pgd_none(*pgd)) {
322 walk_pud(&st, pgd, addr); 317 walk_pud(&st, pgd, addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 98ad9c79ea0e..2495c8cb47ba 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
658 .start = (unsigned long)_stext, 658 .start = (unsigned long)_stext,
659 .end = (unsigned long)__init_begin, 659 .end = (unsigned long)__init_begin,
660#ifdef CONFIG_ARM_LPAE 660#ifdef CONFIG_ARM_LPAE
661 .mask = ~PMD_SECT_RDONLY, 661 .mask = ~L_PMD_SECT_RDONLY,
662 .prot = PMD_SECT_RDONLY, 662 .prot = L_PMD_SECT_RDONLY,
663#else 663#else
664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cda7c40999b6..4e6ef896c619 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
1329static void __init map_lowmem(void) 1329static void __init map_lowmem(void)
1330{ 1330{
1331 struct memblock_region *reg; 1331 struct memblock_region *reg;
1332 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1332 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
1333 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1333 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1334 1334
1335 /* Map all the lowmem memory banks. */ 1335 /* Map all the lowmem memory banks. */
1336 for_each_memblock(memory, reg) { 1336 for_each_memblock(memory, reg) {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dd301be89ecc..5376d908eabe 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,6 +1,7 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
4CONFIG_AUDIT=y 5CONFIG_AUDIT=y
5CONFIG_NO_HZ_IDLE=y 6CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
@@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_IKCONFIG=y 14CONFIG_IKCONFIG=y
14CONFIG_IKCONFIG_PROC=y 15CONFIG_IKCONFIG_PROC=y
15CONFIG_LOG_BUF_SHIFT=14 16CONFIG_LOG_BUF_SHIFT=14
16CONFIG_RESOURCE_COUNTERS=y
17CONFIG_MEMCG=y 17CONFIG_MEMCG=y
18CONFIG_MEMCG_SWAP=y 18CONFIG_MEMCG_SWAP=y
19CONFIG_MEMCG_KMEM=y 19CONFIG_MEMCG_KMEM=y
20CONFIG_CGROUP_HUGETLB=y 20CONFIG_CGROUP_HUGETLB=y
21# CONFIG_UTS_NS is not set 21# CONFIG_UTS_NS is not set
22# CONFIG_IPC_NS is not set 22# CONFIG_IPC_NS is not set
23# CONFIG_PID_NS is not set
24# CONFIG_NET_NS is not set 23# CONFIG_NET_NS is not set
25CONFIG_SCHED_AUTOGROUP=y 24CONFIG_SCHED_AUTOGROUP=y
26CONFIG_BLK_DEV_INITRD=y 25CONFIG_BLK_DEV_INITRD=y
@@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
92CONFIG_SERIAL_OF_PLATFORM=y 91CONFIG_SERIAL_OF_PLATFORM=y
93CONFIG_VIRTIO_CONSOLE=y 92CONFIG_VIRTIO_CONSOLE=y
94# CONFIG_HW_RANDOM is not set 93# CONFIG_HW_RANDOM is not set
95# CONFIG_HMC_DRV is not set
96CONFIG_SPI=y 94CONFIG_SPI=y
97CONFIG_SPI_PL022=y 95CONFIG_SPI_PL022=y
98CONFIG_GPIO_PL061=y 96CONFIG_GPIO_PL061=y
@@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y
133CONFIG_EXT4_FS=y 131CONFIG_EXT4_FS=y
134CONFIG_FANOTIFY=y 132CONFIG_FANOTIFY=y
135CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 133CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
134CONFIG_QUOTA=y
135CONFIG_AUTOFS4_FS=y
136CONFIG_FUSE_FS=y 136CONFIG_FUSE_FS=y
137CONFIG_CUSE=y 137CONFIG_CUSE=y
138CONFIG_VFAT_FS=y 138CONFIG_VFAT_FS=y
@@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y
152CONFIG_DEBUG_KERNEL=y 152CONFIG_DEBUG_KERNEL=y
153CONFIG_LOCKUP_DETECTOR=y 153CONFIG_LOCKUP_DETECTOR=y
154# CONFIG_SCHED_DEBUG is not set 154# CONFIG_SCHED_DEBUG is not set
155# CONFIG_DEBUG_PREEMPT is not set
155# CONFIG_FTRACE is not set 156# CONFIG_FTRACE is not set
157CONFIG_KEYS=y
156CONFIG_SECURITY=y 158CONFIG_SECURITY=y
157CONFIG_CRYPTO_ANSI_CPRNG=y 159CONFIG_CRYPTO_ANSI_CPRNG=y
158CONFIG_ARM64_CRYPTO=y 160CONFIG_ARM64_CRYPTO=y
159CONFIG_CRYPTO_SHA1_ARM64_CE=y 161CONFIG_CRYPTO_SHA1_ARM64_CE=y
160CONFIG_CRYPTO_SHA2_ARM64_CE=y 162CONFIG_CRYPTO_SHA2_ARM64_CE=y
161CONFIG_CRYPTO_GHASH_ARM64_CE=y 163CONFIG_CRYPTO_GHASH_ARM64_CE=y
162CONFIG_CRYPTO_AES_ARM64_CE=y
163CONFIG_CRYPTO_AES_ARM64_CE_CCM=y 164CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
164CONFIG_CRYPTO_AES_ARM64_CE_BLK=y 165CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
165CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y 166CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b1fa4e614718..fbe0ca31a99c 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -21,6 +21,7 @@
21 21
22#include <asm/barrier.h> 22#include <asm/barrier.h>
23 23
24#include <linux/bug.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/types.h> 26#include <linux/types.h>
26 27
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index ace70682499b..8e797b2fcc01 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
39 u64 reg_id_aa64pfr0; 39 u64 reg_id_aa64pfr0;
40 u64 reg_id_aa64pfr1; 40 u64 reg_id_aa64pfr1;
41 41
42 u32 reg_id_dfr0;
42 u32 reg_id_isar0; 43 u32 reg_id_isar0;
43 u32 reg_id_isar1; 44 u32 reg_id_isar1;
44 u32 reg_id_isar2; 45 u32 reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
51 u32 reg_id_mmfr3; 52 u32 reg_id_mmfr3;
52 u32 reg_id_pfr0; 53 u32 reg_id_pfr0;
53 u32 reg_id_pfr1; 54 u32 reg_id_pfr1;
55
56 u32 reg_mvfr0;
57 u32 reg_mvfr1;
58 u32 reg_mvfr2;
54}; 59};
55 60
56DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); 61DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index d34189bceff7..9ce3e680ae1c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
52 dev->archdata.dma_ops = ops; 52 dev->archdata.dma_ops = ops;
53} 53}
54 54
55static inline int set_arch_dma_coherent_ops(struct device *dev) 55static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
56 struct iommu_ops *iommu, bool coherent)
56{ 57{
57 dev->archdata.dma_coherent = true; 58 dev->archdata.dma_coherent = coherent;
58 set_dma_ops(dev, &coherent_swiotlb_dma_ops); 59 if (coherent)
59 return 0; 60 set_dma_ops(dev, &coherent_swiotlb_dma_ops);
60} 61}
61#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops 62#define arch_setup_dma_ops arch_setup_dma_ops
62 63
63/* do not use this function in a driver */ 64/* do not use this function in a driver */
64static inline bool is_device_dma_coherent(struct device *dev) 65static inline bool is_device_dma_coherent(struct device *dev)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 8127e45e2637..865a7e28ea2d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
41static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 41static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
42{ 42{
43 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 43 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
44 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
45 vcpu->arch.hcr_el2 &= ~HCR_RW;
44} 46}
45 47
46static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 48static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index df22314f57cf..210d632aa5ad 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
298#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 298#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
299#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 299#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
300 300
301#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
302#define pud_write(pud) pte_write(pud_pte(pud)) 301#define pud_write(pud) pte_write(pud_pte(pud))
303#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 302#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
304 303
@@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
401 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); 400 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
402} 401}
403 402
404#define pud_page(pud) pmd_page(pud_pmd(pud)) 403#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
405 404
406#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ 405#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
407 406
@@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
437 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr); 436 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
438} 437}
439 438
439#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
440
440#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */ 441#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
441 442
442#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 443#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 286b1bec547c..f9be30ea1cbd 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -31,6 +31,7 @@
31 31
32#include <asm/fpsimd.h> 32#include <asm/fpsimd.h>
33#include <asm/hw_breakpoint.h> 33#include <asm/hw_breakpoint.h>
34#include <asm/pgtable-hwdef.h>
34#include <asm/ptrace.h> 35#include <asm/ptrace.h>
35#include <asm/types.h> 36#include <asm/types.h>
36 37
@@ -123,9 +124,6 @@ struct task_struct;
123/* Free all resources held by a thread. */ 124/* Free all resources held by a thread. */
124extern void release_thread(struct task_struct *); 125extern void release_thread(struct task_struct *);
125 126
126/* Prepare to copy thread state - unlazy all lazy status */
127#define prepare_to_copy(tsk) do { } while (0)
128
129unsigned long get_wchan(struct task_struct *p); 127unsigned long get_wchan(struct task_struct *p);
130 128
131#define cpu_relax() barrier() 129#define cpu_relax() barrier()
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 49c9aefd24a5..23e9432ac112 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47#define __NR_compat_syscalls 386 47#define __NR_compat_syscalls 388
48#endif 48#endif
49 49
50#define __ARCH_WANT_SYS_CLONE 50#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 8893cebcea5b..27224426e0bf 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
795__SYSCALL(__NR_memfd_create, sys_memfd_create) 795__SYSCALL(__NR_memfd_create, sys_memfd_create)
796#define __NR_bpf 386 796#define __NR_bpf 386
797__SYSCALL(__NR_bpf, sys_bpf) 797__SYSCALL(__NR_bpf, sys_bpf)
798#define __NR_execveat 387
799__SYSCALL(__NR_execveat, compat_sys_execveat)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 57b641747534..07d435cf2eea 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
147 * If we have AArch32, we care about 32-bit features for compat. These 147 * If we have AArch32, we care about 32-bit features for compat. These
148 * registers should be RES0 otherwise. 148 * registers should be RES0 otherwise.
149 */ 149 */
150 diff |= CHECK(id_dfr0, boot, cur, cpu);
150 diff |= CHECK(id_isar0, boot, cur, cpu); 151 diff |= CHECK(id_isar0, boot, cur, cpu);
151 diff |= CHECK(id_isar1, boot, cur, cpu); 152 diff |= CHECK(id_isar1, boot, cur, cpu);
152 diff |= CHECK(id_isar2, boot, cur, cpu); 153 diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
165 diff |= CHECK(id_pfr0, boot, cur, cpu); 166 diff |= CHECK(id_pfr0, boot, cur, cpu);
166 diff |= CHECK(id_pfr1, boot, cur, cpu); 167 diff |= CHECK(id_pfr1, boot, cur, cpu);
167 168
169 diff |= CHECK(mvfr0, boot, cur, cpu);
170 diff |= CHECK(mvfr1, boot, cur, cpu);
171 diff |= CHECK(mvfr2, boot, cur, cpu);
172
168 /* 173 /*
169 * Mismatched CPU features are a recipe for disaster. Don't even 174 * Mismatched CPU features are a recipe for disaster. Don't even
170 * pretend to support them. 175 * pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
189 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); 194 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
190 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); 195 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
191 196
197 info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
192 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 198 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
193 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 199 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
194 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); 200 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
202 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 208 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
203 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 209 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
204 210
211 info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
212 info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
213 info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
214
205 cpuinfo_detect_icache_policy(info); 215 cpuinfo_detect_icache_policy(info);
206 216
207 check_local_cpu_errata(); 217 check_local_cpu_errata();
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 6fac253bc783..2bb4347d0edf 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
326 326
327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ 327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
328 efi_setup_idmap(); 328 efi_setup_idmap();
329 early_memunmap(memmap.map, memmap.map_end - memmap.map);
329} 330}
330 331
331static int __init remap_region(efi_memory_desc_t *md, void **new) 332static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
380 } 381 }
381 382
382 mapsize = memmap.map_end - memmap.map; 383 mapsize = memmap.map_end - memmap.map;
383 early_memunmap(memmap.map, mapsize);
384 384
385 if (efi_runtime_disabled()) { 385 if (efi_runtime_disabled()) {
386 pr_info("EFI runtime services will be disabled.\n"); 386 pr_info("EFI runtime services will be disabled.\n");
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fd027b101de5..9b6f71db2709 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,7 @@
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/moduleloader.h> 26#include <linux/moduleloader.h>
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <asm/alternative.h>
28#include <asm/insn.h> 29#include <asm/insn.h>
29#include <asm/sections.h> 30#include <asm/sections.h>
30 31
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 6762ad705587..3f62b35fb6f1 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
50 else 50 else
51 return PERF_SAMPLE_REGS_ABI_64; 51 return PERF_SAMPLE_REGS_ABI_64;
52} 52}
53
54void perf_get_regs_user(struct perf_regs *regs_user,
55 struct pt_regs *regs,
56 struct pt_regs *regs_user_copy)
57{
58 regs_user->regs = task_pt_regs(current);
59 regs_user->abi = perf_reg_abi(current);
60}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b80991166754..20fe2932ad0c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
402 request_standard_resources(); 402 request_standard_resources();
403 403
404 efi_idmap_init(); 404 efi_idmap_init();
405 early_ioremap_reset();
405 406
406 unflatten_device_tree(); 407 unflatten_device_tree();
407 408
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 4f93c67e63de..14944e5b28da 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -25,6 +25,7 @@
25#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
26#include <asm/cpu_ops.h> 26#include <asm/cpu_ops.h>
27#include <asm/cputype.h> 27#include <asm/cputype.h>
28#include <asm/io.h>
28#include <asm/smp_plat.h> 29#include <asm/smp_plat.h>
29 30
30extern void secondary_holding_pen(void); 31extern void secondary_holding_pen(void);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 3771b72b6569..2d6b6065fe7f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -5,6 +5,7 @@
5#include <asm/debug-monitors.h> 5#include <asm/debug-monitors.h>
6#include <asm/pgtable.h> 6#include <asm/pgtable.h>
7#include <asm/memory.h> 7#include <asm/memory.h>
8#include <asm/mmu_context.h>
8#include <asm/smp_plat.h> 9#include <asm/smp_plat.h>
9#include <asm/suspend.h> 10#include <asm/suspend.h>
10#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
@@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
98 */ 99 */
99 ret = __cpu_suspend_enter(arg, fn); 100 ret = __cpu_suspend_enter(arg, fn);
100 if (ret == 0) { 101 if (ret == 0) {
101 cpu_switch_mm(mm->pgd, mm); 102 /*
103 * We are resuming from reset with TTBR0_EL1 set to the
104 * idmap to enable the MMU; restore the active_mm mappings in
105 * TTBR0_EL1 unless the active_mm == &init_mm, in which case
106 * the thread entered __cpu_suspend with TTBR0_EL1 set to
107 * reserved TTBR0 page tables and should be restored as such.
108 */
109 if (mm == &init_mm)
110 cpu_set_reserved_ttbr0();
111 else
112 cpu_switch_mm(mm->pgd, mm);
113
102 flush_tlb_all(); 114 flush_tlb_all();
103 115
104 /* 116 /*
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index fbe909fb0a1a..c3ca89c27c6b 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
1014 * Instead, we invalidate Stage-2 for this IPA, and the 1014 * Instead, we invalidate Stage-2 for this IPA, and the
1015 * whole of Stage-1. Weep... 1015 * whole of Stage-1. Weep...
1016 */ 1016 */
1017 lsr x1, x1, #12
1017 tlbi ipas2e1is, x1 1018 tlbi ipas2e1is, x1
1018 /* 1019 /*
1019 * We have to ensure completion of the invalidation at Stage-2, 1020 * We have to ensure completion of the invalidation at Stage-2,
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 70a7816535cd..0b4326578985 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
90 if (!cpu_has_32bit_el1()) 90 if (!cpu_has_32bit_el1())
91 return -EINVAL; 91 return -EINVAL;
92 cpu_reset = &default_regs_reset32; 92 cpu_reset = &default_regs_reset32;
93 vcpu->arch.hcr_el2 &= ~HCR_RW;
94 } else { 93 } else {
95 cpu_reset = &default_regs_reset; 94 cpu_reset = &default_regs_reset;
96 } 95 }
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bac492c12fcc..c95464a33f36 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -335,14 +335,8 @@ static int keep_initrd;
335 335
336void free_initrd_mem(unsigned long start, unsigned long end) 336void free_initrd_mem(unsigned long start, unsigned long end)
337{ 337{
338 if (!keep_initrd) { 338 if (!keep_initrd)
339 if (start == initrd_start)
340 start = round_down(start, PAGE_SIZE);
341 if (end == initrd_end)
342 end = round_up(end, PAGE_SIZE);
343
344 free_reserved_area((void *)start, (void *)end, 0, "initrd"); 339 free_reserved_area((void *)start, (void *)end, 0, "initrd");
345 }
346} 340}
347 341
348static int __init keepinitrd_setup(char *__unused) 342static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 6f4bac969bf7..23eada79439c 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/delay.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h> 13#include <linux/mtd/partitions.h>
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index f3b51b57740a..95c39b95e97e 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 318 /* length of syscall table */ 14#define NR_syscalls 319 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 4c2240c1b0cb..461079560c78 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -331,5 +331,6 @@
331#define __NR_getrandom 1339 331#define __NR_getrandom 1339
332#define __NR_memfd_create 1340 332#define __NR_memfd_create 1340
333#define __NR_bpf 1341 333#define __NR_bpf 1341
334#define __NR_execveat 1342
334 335
335#endif /* _UAPI_ASM_IA64_UNISTD_H */ 336#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 615ef81def49..e795cb848154 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
893} 893}
894 894
895/* wrapper to silence section mismatch warning */ 895/* wrapper to silence section mismatch warning */
896int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 896int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
897{ 897{
898 return _acpi_map_lsapic(handle, physid, pcpu); 898 return _acpi_map_lsapic(handle, physid, pcpu);
899} 899}
900EXPORT_SYMBOL(acpi_map_lsapic); 900EXPORT_SYMBOL(acpi_map_cpu);
901 901
902int acpi_unmap_lsapic(int cpu) 902int acpi_unmap_cpu(int cpu)
903{ 903{
904 ia64_cpu_to_sapicid[cpu] = -1; 904 ia64_cpu_to_sapicid[cpu] = -1;
905 set_cpu_present(cpu, false); 905 set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
910 910
911 return (0); 911 return (0);
912} 912}
913 913EXPORT_SYMBOL(acpi_unmap_cpu);
914EXPORT_SYMBOL(acpi_unmap_lsapic);
915#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 914#endif /* CONFIG_ACPI_HOTPLUG_CPU */
916 915
917#ifdef CONFIG_ACPI_NUMA 916#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index f5e96dffc63c..fcf8b8cbca0b 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1779,6 +1779,7 @@ sys_call_table:
1779 data8 sys_getrandom 1779 data8 sys_getrandom
1780 data8 sys_memfd_create // 1340 1780 data8 sys_memfd_create // 1340
1781 data8 sys_bpf 1781 data8 sys_bpf
1782 data8 sys_execveat
1782 1783
1783 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1784 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1784#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1785#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 75e75d7b1702..244e0dbe45db 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 355 7#define NR_syscalls 356
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 2c1bec9a14b6..61fb6cb9d2ae 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -360,5 +360,6 @@
360#define __NR_getrandom 352 360#define __NR_getrandom 352
361#define __NR_memfd_create 353 361#define __NR_memfd_create 353
362#define __NR_bpf 354 362#define __NR_bpf 354
363#define __NR_execveat 355
363 364
364#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 365#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 2ca219e184cd..a0ec4303f2c8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
375 .long sys_getrandom 375 .long sys_getrandom
376 .long sys_memfd_create 376 .long sys_memfd_create
377 .long sys_bpf 377 .long sys_bpf
378 .long sys_execveat /* 355 */
378 379
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 51d5bb90d3e5..a223691dff4f 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
72 cpuinfo.has_div = fcpu_has(cpu, "altr,has-div"); 72 cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
73 cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul"); 73 cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
74 cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx"); 74 cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
75 cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
75 76
76 if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div) 77 if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
77 err_cpu("DIV"); 78 err_cpu("DIV");
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 83bca17d1008..0bdfd13ff98b 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
365 GET_THREAD_INFO r1 365 GET_THREAD_INFO r1
366 ldw r4, TI_PREEMPT_COUNT(r1) 366 ldw r4, TI_PREEMPT_COUNT(r1)
367 bne r4, r0, restore_all 367 bne r4, r0, restore_all
368
369need_resched:
370 ldw r4, TI_FLAGS(r1) /* ? Need resched set */ 368 ldw r4, TI_FLAGS(r1) /* ? Need resched set */
371 BTBZ r10, r4, TIF_NEED_RESCHED, restore_all 369 BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
372 ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ 370 ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
373 andi r10, r4, ESTATUS_EPIE 371 andi r10, r4, ESTATUS_EPIE
374 beq r10, r0, restore_all 372 beq r10, r0, restore_all
375 movia r4, PREEMPT_ACTIVE 373 call preempt_schedule_irq
376 stw r4, TI_PREEMPT_COUNT(r1)
377 rdctl r10, status /* enable intrs again */
378 ori r10, r10 ,STATUS_PIE
379 wrctl status, r10
380 PUSH r1
381 call schedule
382 POP r1
383 mov r4, r0
384 stw r4, TI_PREEMPT_COUNT(r1)
385 rdctl r10, status /* disable intrs */
386 andi r10, r10, %lo(~STATUS_PIE)
387 wrctl status, r10
388 br need_resched
389#else
390 br restore_all
391#endif 374#endif
375 br restore_all
392 376
393/*********************************************************************** 377/***********************************************************************
394 * A few syscall wrappers 378 * A few syscall wrappers
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index d2d11b7055ba..8121aa6db2ff 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -33,11 +33,18 @@
33 33
34#endif /*!CONFIG_PA20*/ 34#endif /*!CONFIG_PA20*/
35 35
36/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ 36/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
37 We don't explicitly expose that "*a" may be written as reload
38 fails to find a register in class R1_REGS when "a" needs to be
39 reloaded when generating 64-bit PIC code. Instead, we clobber
40 memory to indicate to the compiler that the assembly code reads
41 or writes to items other than those listed in the input and output
42 operands. This may pessimize the code somewhat but __ldcw is
43 usually used within code blocks surrounded by memory barriors. */
37#define __ldcw(a) ({ \ 44#define __ldcw(a) ({ \
38 unsigned __ret; \ 45 unsigned __ret; \
39 __asm__ __volatile__(__LDCW " 0(%2),%0" \ 46 __asm__ __volatile__(__LDCW " 0(%1),%0" \
40 : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ 47 : "=r" (__ret) : "r" (a) : "memory"); \
41 __ret; \ 48 __ret; \
42}) 49})
43 50
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index d3feba5a275f..c154cebc1041 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
154MODULE_LICENSE("GPL"); 154MODULE_LICENSE("GPL");
155MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 155MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
156 156
157MODULE_ALIAS_CRYPTO("sha1");
157MODULE_ALIAS_CRYPTO("sha1-powerpc"); 158MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 19c36cba37c4..a46f5f45570c 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
86extern void reserve_crashkernel(void); 86extern void reserve_crashkernel(void);
87extern void machine_kexec_mask_interrupts(void); 87extern void machine_kexec_mask_interrupts(void);
88 88
89static inline bool kdump_in_progress(void)
90{
91 return crashing_cpu >= 0;
92}
93
89#else /* !CONFIG_KEXEC */ 94#else /* !CONFIG_KEXEC */
90static inline void crash_kexec_secondary(struct pt_regs *regs) { } 95static inline void crash_kexec_secondary(struct pt_regs *regs) { }
91 96
@@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
106 return 0; 111 return 0;
107} 112}
108 113
114static inline bool kdump_in_progress(void)
115{
116 return false;
117}
118
109#endif /* CONFIG_KEXEC */ 119#endif /* CONFIG_KEXEC */
110#endif /* ! __ASSEMBLY__ */ 120#endif /* ! __ASSEMBLY__ */
111#endif /* __KERNEL__ */ 121#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ce9577d693be..91062eef582f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
366SYSCALL_SPU(getrandom) 366SYSCALL_SPU(getrandom)
367SYSCALL_SPU(memfd_create) 367SYSCALL_SPU(memfd_create)
368SYSCALL_SPU(bpf) 368SYSCALL_SPU(bpf)
369COMPAT_SYS(execveat)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690a..0be6c681cab1 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
23#define THREAD_SIZE (1 << THREAD_SHIFT) 23#define THREAD_SIZE (1 << THREAD_SHIFT)
24 24
25#ifdef CONFIG_PPC64 25#ifdef CONFIG_PPC64
26#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT 26#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
27#else 27#else
28#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT 28#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
29#endif 29#endif
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
71#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 71#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
72 72
73/* how to get the thread information struct from C */ 73/* how to get the thread information struct from C */
74register unsigned long __current_r1 asm("r1");
75static inline struct thread_info *current_thread_info(void) 74static inline struct thread_info *current_thread_info(void)
76{ 75{
77 /* gcc4, at least, is smart enough to turn this into a single 76 unsigned long val;
78 * rlwinm for ppc32 and clrrdi for ppc64 */ 77
79 return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); 78 asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
79
80 return (struct thread_info *)val;
80} 81}
81 82
82#endif /* __ASSEMBLY__ */ 83#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index e0da021caa00..36b79c31eedd 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 362 15#define __NR_syscalls 363
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index f55351f2e66e..ef5b5b1f3123 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
384#define __NR_getrandom 359 384#define __NR_getrandom 359
385#define __NR_memfd_create 360 385#define __NR_memfd_create 360
386#define __NR_bpf 361 386#define __NR_bpf 361
387#define __NR_execveat 362
387 388
388#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 389#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 879b3aacac32..f96d1ec24189 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
330 * using debugger IPI. 330 * using debugger IPI.
331 */ 331 */
332 332
333 if (crashing_cpu == -1) 333 if (!kdump_in_progress())
334 kexec_prepare_cpus(); 334 kexec_prepare_cpus();
335 335
336 pr_debug("kexec: Starting switchover sequence.\n"); 336 pr_debug("kexec: Starting switchover sequence.\n");
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ec017cb4446..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -700,6 +700,7 @@ void start_secondary(void *unused)
700 smp_store_cpu_info(cpu); 700 smp_store_cpu_info(cpu);
701 set_dec(tb_ticks_per_jiffy); 701 set_dec(tb_ticks_per_jiffy);
702 preempt_disable(); 702 preempt_disable();
703 cpu_callin_map[cpu] = 1;
703 704
704 if (smp_ops->setup_cpu) 705 if (smp_ops->setup_cpu)
705 smp_ops->setup_cpu(cpu); 706 smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@ void start_secondary(void *unused)
738 notify_cpu_starting(cpu); 739 notify_cpu_starting(cpu);
739 set_cpu_online(cpu, true); 740 set_cpu_online(cpu, true);
740 741
741 /*
742 * CPU must be marked active and online before we signal back to the
743 * master, because the scheduler needs to see the cpu_online and
744 * cpu_active bits set.
745 */
746 smp_wmb();
747 cpu_callin_map[cpu] = 1;
748
749 local_irq_enable(); 742 local_irq_enable();
750 743
751 cpu_startup_entry(CPUHP_ONLINE); 744 cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b288..0509bca5e830 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
40 b 1f; \ 40 b 1f; \
41END_FTR_SECTION(0, 1); \ 41END_FTR_SECTION(0, 1); \
42 ld r12,opal_tracepoint_refcount@toc(r2); \ 42 ld r12,opal_tracepoint_refcount@toc(r2); \
43 std r12,32(r1); \
44 cmpdi r12,0; \ 43 cmpdi r12,0; \
45 bne- LABEL; \ 44 bne- LABEL; \
461: 451:
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 469751d92004..b5682fd6c984 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
43#include <asm/trace.h> 43#include <asm/trace.h>
44#include <asm/firmware.h> 44#include <asm/firmware.h>
45#include <asm/plpar_wrappers.h> 45#include <asm/plpar_wrappers.h>
46#include <asm/kexec.h>
46#include <asm/fadump.h> 47#include <asm/fadump.h>
47 48
48#include "pseries.h" 49#include "pseries.h"
@@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
267 * out to the user, but at least this will stop us from 268 * out to the user, but at least this will stop us from
268 * continuing on further and creating an even more 269 * continuing on further and creating an even more
269 * difficult to debug situation. 270 * difficult to debug situation.
271 *
272 * There is a known problem when kdump'ing, if cpus are offline
273 * the above call will fail. Rather than panicking again, keep
274 * going and hope the kdump kernel is also little endian, which
275 * it usually is.
270 */ 276 */
271 if (rc) 277 if (rc && !kdump_in_progress())
272 panic("Could not enable big endian exceptions"); 278 panic("Could not enable big endian exceptions");
273 } 279 }
274#endif 280#endif
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 32040ace00ea..afbe07907c10 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -231,7 +231,7 @@ failed:
231struct dbfs_d2fc_hdr { 231struct dbfs_d2fc_hdr {
232 u64 len; /* Length of d2fc buffer without header */ 232 u64 len; /* Length of d2fc buffer without header */
233 u16 version; /* Version of header */ 233 u16 version; /* Version of header */
234 char tod_ext[16]; /* TOD clock for d2fc */ 234 char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
235 u64 count; /* Number of VM guests in d2fc buffer */ 235 u64 count; /* Number of VM guests in d2fc buffer */
236 char reserved[30]; 236 char reserved[30];
237} __attribute__ ((packed)); 237} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 37b9091ab8c0..16aa0c779e07 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
36 36
37static inline notrace unsigned long arch_local_save_flags(void) 37static inline notrace unsigned long arch_local_save_flags(void)
38{ 38{
39 return __arch_local_irq_stosm(0x00); 39 return __arch_local_irq_stnsm(0xff);
40} 40}
41 41
42static inline notrace unsigned long arch_local_irq_save(void) 42static inline notrace unsigned long arch_local_irq_save(void)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1cceba4..98eb2a579223 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
67 set_clock_comparator(S390_lowcore.clock_comparator); 67 set_clock_comparator(S390_lowcore.clock_comparator);
68} 68}
69 69
70#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 70#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
71#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
71 72
72typedef unsigned long long cycles_t; 73typedef unsigned long long cycles_t;
73 74
74static inline void get_tod_clock_ext(char clk[16]) 75static inline void get_tod_clock_ext(char *clk)
75{ 76{
76 typedef struct { char _[sizeof(clk)]; } addrtype; 77 typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
77 78
78 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); 79 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
79} 80}
80 81
81static inline unsigned long long get_tod_clock(void) 82static inline unsigned long long get_tod_clock(void)
82{ 83{
83 unsigned char clk[16]; 84 unsigned char clk[STORE_CLOCK_EXT_SIZE];
85
84 get_tod_clock_ext(clk); 86 get_tod_clock_ext(clk);
85 return *((unsigned long long *)&clk[1]); 87 return *((unsigned long long *)&clk[1]);
86} 88}
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 2b446cf0cc65..67878af257a0 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -289,7 +289,8 @@
289#define __NR_bpf 351 289#define __NR_bpf 351
290#define __NR_s390_pci_mmio_write 352 290#define __NR_s390_pci_mmio_write 352
291#define __NR_s390_pci_mmio_read 353 291#define __NR_s390_pci_mmio_read 353
292#define NR_syscalls 354 292#define __NR_execveat 354
293#define NR_syscalls 355
293 294
294/* 295/*
295 * There are some system calls that are not present on 64 bit, some 296 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a2987243bc76..939ec474b1dd 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) 362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
363SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) 363SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
364SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) 364SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
365SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index f6b3cd056ec2..cc7328080b60 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
48 return false; 48 return false;
49} 49}
50 50
51static int check_per_event(unsigned short cause, unsigned long control,
52 struct pt_regs *regs)
53{
54 if (!(regs->psw.mask & PSW_MASK_PER))
55 return 0;
56 /* user space single step */
57 if (control == 0)
58 return 1;
59 /* over indication for storage alteration */
60 if ((control & 0x20200000) && (cause & 0x2000))
61 return 1;
62 if (cause & 0x8000) {
63 /* all branches */
64 if ((control & 0x80800000) == 0x80000000)
65 return 1;
66 /* branch into selected range */
67 if (((control & 0x80800000) == 0x80800000) &&
68 regs->psw.addr >= current->thread.per_user.start &&
69 regs->psw.addr <= current->thread.per_user.end)
70 return 1;
71 }
72 return 0;
73}
74
51int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 75int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
52{ 76{
53 int fixup = probe_get_fixup_type(auprobe->insn); 77 int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
71 if (regs->psw.addr - utask->xol_vaddr == ilen) 95 if (regs->psw.addr - utask->xol_vaddr == ilen)
72 regs->psw.addr = utask->vaddr + ilen; 96 regs->psw.addr = utask->vaddr + ilen;
73 } 97 }
74 /* If per tracing was active generate trap */ 98 if (check_per_event(current->thread.per_event.cause,
75 if (regs->psw.mask & PSW_MASK_PER) 99 current->thread.per_user.control, regs)) {
76 do_per_trap(regs); 100 /* fix per address */
101 current->thread.per_event.address = utask->vaddr;
102 /* trigger per event */
103 set_pt_regs_flag(regs, PIF_PER_TRAP);
104 }
77 return 0; 105 return 0;
78} 106}
79 107
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
106 clear_thread_flag(TIF_UPROBE_SINGLESTEP); 134 clear_thread_flag(TIF_UPROBE_SINGLESTEP);
107 regs->int_code = auprobe->saved_int_code; 135 regs->int_code = auprobe->saved_int_code;
108 regs->psw.addr = current->utask->vaddr; 136 regs->psw.addr = current->utask->vaddr;
137 current->thread.per_event.address = current->utask->vaddr;
109} 138}
110 139
111unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, 140unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
146 __rc; \ 175 __rc; \
147}) 176})
148 177
149#define emu_store_ril(ptr, input) \ 178#define emu_store_ril(regs, ptr, input) \
150({ \ 179({ \
151 unsigned int mask = sizeof(*(ptr)) - 1; \ 180 unsigned int mask = sizeof(*(ptr)) - 1; \
181 __typeof__(ptr) __ptr = (ptr); \
152 int __rc = 0; \ 182 int __rc = 0; \
153 \ 183 \
154 if (!test_facility(34)) \ 184 if (!test_facility(34)) \
155 __rc = EMU_ILLEGAL_OP; \ 185 __rc = EMU_ILLEGAL_OP; \
156 else if ((u64 __force)ptr & mask) \ 186 else if ((u64 __force)__ptr & mask) \
157 __rc = EMU_SPECIFICATION; \ 187 __rc = EMU_SPECIFICATION; \
158 else if (put_user(*(input), ptr)) \ 188 else if (put_user(*(input), __ptr)) \
159 __rc = EMU_ADDRESSING; \ 189 __rc = EMU_ADDRESSING; \
190 if (__rc == 0) \
191 sim_stor_event(regs, __ptr, mask + 1); \
160 __rc; \ 192 __rc; \
161}) 193})
162 194
@@ -198,6 +230,25 @@ union split_register {
198}; 230};
199 231
200/* 232/*
233 * If user per registers are setup to trace storage alterations and an
234 * emulated store took place on a fitting address a user trap is generated.
235 */
236static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
237{
238 if (!(regs->psw.mask & PSW_MASK_PER))
239 return;
240 if (!(current->thread.per_user.control & PER_EVENT_STORE))
241 return;
242 if ((void *)current->thread.per_user.start > (addr + len))
243 return;
244 if ((void *)current->thread.per_user.end < addr)
245 return;
246 current->thread.per_event.address = regs->psw.addr;
247 current->thread.per_event.cause = PER_EVENT_STORE >> 16;
248 set_pt_regs_flag(regs, PIF_PER_TRAP);
249}
250
251/*
201 * pc relative instructions are emulated, since parameters may not be 252 * pc relative instructions are emulated, since parameters may not be
202 * accessible from the xol area due to range limitations. 253 * accessible from the xol area due to range limitations.
203 */ 254 */
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
249 rc = emu_load_ril((u32 __user *)uptr, &rx->u64); 300 rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
250 break; 301 break;
251 case 0x07: /* sthrl */ 302 case 0x07: /* sthrl */
252 rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]); 303 rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
253 break; 304 break;
254 case 0x0b: /* stgrl */ 305 case 0x0b: /* stgrl */
255 rc = emu_store_ril((u64 __user *)uptr, &rx->u64); 306 rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
256 break; 307 break;
257 case 0x0f: /* strl */ 308 case 0x0f: /* strl */
258 rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]); 309 rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
259 break; 310 break;
260 } 311 }
261 break; 312 break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7f0089d9a4aa..e34122e539a1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
128 struct thread_info *ti = task_thread_info(tsk); 128 struct thread_info *ti = task_thread_info(tsk);
129 u64 timer, system; 129 u64 timer, system;
130 130
131 WARN_ON_ONCE(!irqs_disabled());
132
133 timer = S390_lowcore.last_update_timer; 131 timer = S390_lowcore.last_update_timer;
134 S390_lowcore.last_update_timer = get_vtimer(); 132 S390_lowcore.last_update_timer = get_vtimer();
135 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 133 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be99357d238c..3cf8cc03fff6 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
322static unsigned long __gmap_segment_gaddr(unsigned long *entry) 322static unsigned long __gmap_segment_gaddr(unsigned long *entry)
323{ 323{
324 struct page *page; 324 struct page *page;
325 unsigned long offset; 325 unsigned long offset, mask;
326 326
327 offset = (unsigned long) entry / sizeof(unsigned long); 327 offset = (unsigned long) entry / sizeof(unsigned long);
328 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; 328 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
329 page = pmd_to_page((pmd_t *) entry); 329 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
330 page = virt_to_page((void *)((unsigned long) entry & mask));
330 return page->index + offset; 331 return page->index + offset;
331} 332}
332 333
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index c52ac77408ca..524496d47ef5 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
431 EMIT4_DISP(0x88500000, K); 431 EMIT4_DISP(0x88500000, K);
432 break; 432 break;
433 case BPF_ALU | BPF_NEG: /* A = -A */ 433 case BPF_ALU | BPF_NEG: /* A = -A */
434 /* lnr %r5,%r5 */ 434 /* lcr %r5,%r5 */
435 EMIT2(0x1155); 435 EMIT2(0x1355);
436 break; 436 break;
437 case BPF_JMP | BPF_JA: /* ip += K */ 437 case BPF_JMP | BPF_JA: /* ip += K */
438 offset = addrs[i + K] + jit->start - jit->prg; 438 offset = addrs[i + K] + jit->start - jit->prg;
@@ -502,8 +502,8 @@ branch: if (filter->jt == filter->jf) {
502xbranch: /* Emit compare if the branch targets are different */ 502xbranch: /* Emit compare if the branch targets are different */
503 if (filter->jt != filter->jf) { 503 if (filter->jt != filter->jf) {
504 jit->seen |= SEEN_XREG; 504 jit->seen |= SEEN_XREG;
505 /* cr %r5,%r12 */ 505 /* clr %r5,%r12 */
506 EMIT2(0x195c); 506 EMIT2(0x155c);
507 } 507 }
508 goto branch; 508 goto branch;
509 case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ 509 case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 87bc86821bc9..d195a87ca542 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,6 +3,7 @@ config UML
3 default y 3 default y
4 select HAVE_ARCH_AUDITSYSCALL 4 select HAVE_ARCH_AUDITSYSCALL
5 select HAVE_UID16 5 select HAVE_UID16
6 select HAVE_FUTEX_CMPXCHG if FUTEX
6 select GENERIC_IRQ_SHOW 7 select GENERIC_IRQ_SHOW
7 select GENERIC_CPU_DEVICES 8 select GENERIC_CPU_DEVICES
8 select GENERIC_IO 9 select GENERIC_IO
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5b016e2498f3..3db07f30636f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -51,6 +51,7 @@ targets += cpustr.h
51$(obj)/cpustr.h: $(obj)/mkcpustr FORCE 51$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
52 $(call if_changed,cpustr) 52 $(call if_changed,cpustr)
53endif 53endif
54clean-files += cpustr.h
54 55
55# --------------------------------------------------------------------------- 56# ---------------------------------------------------------------------------
56 57
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index fd0f848938cc..5a4a089e8b1f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
26 26
27obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o 27obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
28obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o 28obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
29obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
30obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 29obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
31obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 30obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
32obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o 31obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
46ifeq ($(avx2_supported),yes) 45ifeq ($(avx2_supported),yes)
47 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o 46 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
48 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o 47 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
48 obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
49endif 49endif
50 50
51aes-i586-y := aes-i586-asm_32.o aes_glue.o 51aes-i586-y := aes-i586-asm_32.o aes_glue.o
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 2df2a0298f5a..a916c4a61165 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -208,7 +208,7 @@ ddq_add_8:
208 208
209 .if (klen == KEY_128) 209 .if (klen == KEY_128)
210 .if (load_keys) 210 .if (load_keys)
211 vmovdqa 3*16(p_keys), xkeyA 211 vmovdqa 3*16(p_keys), xkey4
212 .endif 212 .endif
213 .else 213 .else
214 vmovdqa 3*16(p_keys), xkeyA 214 vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
224 add $(16*by), p_in 224 add $(16*by), p_in
225 225
226 .if (klen == KEY_128) 226 .if (klen == KEY_128)
227 vmovdqa 4*16(p_keys), xkey4 227 vmovdqa 4*16(p_keys), xkeyB
228 .else 228 .else
229 .if (load_keys) 229 .if (load_keys)
230 vmovdqa 4*16(p_keys), xkey4 230 vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
234 .set i, 0 234 .set i, 0
235 .rept by 235 .rept by
236 club XDATA, i 236 club XDATA, i
237 vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ 237 /* key 3 */
238 .if (klen == KEY_128)
239 vaesenc xkey4, var_xdata, var_xdata
240 .else
241 vaesenc xkeyA, var_xdata, var_xdata
242 .endif
238 .set i, (i +1) 243 .set i, (i +1)
239 .endr 244 .endr
240 245
@@ -243,13 +248,18 @@ ddq_add_8:
243 .set i, 0 248 .set i, 0
244 .rept by 249 .rept by
245 club XDATA, i 250 club XDATA, i
246 vaesenc xkey4, var_xdata, var_xdata /* key 4 */ 251 /* key 4 */
252 .if (klen == KEY_128)
253 vaesenc xkeyB, var_xdata, var_xdata
254 .else
255 vaesenc xkey4, var_xdata, var_xdata
256 .endif
247 .set i, (i +1) 257 .set i, (i +1)
248 .endr 258 .endr
249 259
250 .if (klen == KEY_128) 260 .if (klen == KEY_128)
251 .if (load_keys) 261 .if (load_keys)
252 vmovdqa 6*16(p_keys), xkeyB 262 vmovdqa 6*16(p_keys), xkey8
253 .endif 263 .endif
254 .else 264 .else
255 vmovdqa 6*16(p_keys), xkeyB 265 vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
267 .set i, 0 277 .set i, 0
268 .rept by 278 .rept by
269 club XDATA, i 279 club XDATA, i
270 vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ 280 /* key 6 */
281 .if (klen == KEY_128)
282 vaesenc xkey8, var_xdata, var_xdata
283 .else
284 vaesenc xkeyB, var_xdata, var_xdata
285 .endif
271 .set i, (i +1) 286 .set i, (i +1)
272 .endr 287 .endr
273 288
274 .if (klen == KEY_128) 289 .if (klen == KEY_128)
275 vmovdqa 8*16(p_keys), xkey8 290 vmovdqa 8*16(p_keys), xkeyB
276 .else 291 .else
277 .if (load_keys) 292 .if (load_keys)
278 vmovdqa 8*16(p_keys), xkey8 293 vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
288 303
289 .if (klen == KEY_128) 304 .if (klen == KEY_128)
290 .if (load_keys) 305 .if (load_keys)
291 vmovdqa 9*16(p_keys), xkeyA 306 vmovdqa 9*16(p_keys), xkey12
292 .endif 307 .endif
293 .else 308 .else
294 vmovdqa 9*16(p_keys), xkeyA 309 vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
297 .set i, 0 312 .set i, 0
298 .rept by 313 .rept by
299 club XDATA, i 314 club XDATA, i
300 vaesenc xkey8, var_xdata, var_xdata /* key 8 */ 315 /* key 8 */
316 .if (klen == KEY_128)
317 vaesenc xkeyB, var_xdata, var_xdata
318 .else
319 vaesenc xkey8, var_xdata, var_xdata
320 .endif
301 .set i, (i +1) 321 .set i, (i +1)
302 .endr 322 .endr
303 323
@@ -306,7 +326,12 @@ ddq_add_8:
306 .set i, 0 326 .set i, 0
307 .rept by 327 .rept by
308 club XDATA, i 328 club XDATA, i
309 vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ 329 /* key 9 */
330 .if (klen == KEY_128)
331 vaesenc xkey12, var_xdata, var_xdata
332 .else
333 vaesenc xkeyA, var_xdata, var_xdata
334 .endif
310 .set i, (i +1) 335 .set i, (i +1)
311 .endr 336 .endr
312 337
@@ -412,7 +437,6 @@ ddq_add_8:
412/* main body of aes ctr load */ 437/* main body of aes ctr load */
413 438
414.macro do_aes_ctrmain key_len 439.macro do_aes_ctrmain key_len
415
416 cmp $16, num_bytes 440 cmp $16, num_bytes
417 jb .Ldo_return2\key_len 441 jb .Ldo_return2\key_len
418 442
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a225a5ca1037..fd9f6b035b16 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -931,4 +931,4 @@ module_exit(sha1_mb_mod_fini);
931MODULE_LICENSE("GPL"); 931MODULE_LICENSE("GPL");
932MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); 932MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
933 933
934MODULE_ALIAS("sha1"); 934MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e7e9682a33e9..f556c4843aa1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
80 80
81 /* 81 /*
82 * Load per CPU data from GDT. LSL is faster than RDTSCP and 82 * Load per CPU data from GDT. LSL is faster than RDTSCP and
83 * works on all CPUs. 83 * works on all CPUs. This is volatile so that it orders
84 * correctly wrt barrier() and to keep gcc from cleverly
85 * hoisting it out of the calling function.
84 */ 86 */
85 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 87 asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
86 88
87 return p; 89 return p;
88} 90}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4433a4be8171..d1626364a28a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
750} 750}
751 751
752/* wrapper to silence section mismatch warning */ 752/* wrapper to silence section mismatch warning */
753int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 753int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
754{ 754{
755 return _acpi_map_lsapic(handle, physid, pcpu); 755 return _acpi_map_lsapic(handle, physid, pcpu);
756} 756}
757EXPORT_SYMBOL(acpi_map_lsapic); 757EXPORT_SYMBOL(acpi_map_cpu);
758 758
759int acpi_unmap_lsapic(int cpu) 759int acpi_unmap_cpu(int cpu)
760{ 760{
761#ifdef CONFIG_ACPI_NUMA 761#ifdef CONFIG_ACPI_NUMA
762 set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); 762 set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
768 768
769 return (0); 769 return (0);
770} 770}
771 771EXPORT_SYMBOL(acpi_unmap_cpu);
772EXPORT_SYMBOL(acpi_unmap_lsapic);
773#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 772#endif /* CONFIG_ACPI_HOTPLUG_CPU */
774 773
775int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) 774int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index e27b49d7c922..80091ae54c2b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -66,3 +66,4 @@ targets += capflags.c
66$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE 66$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
67 $(call if_changed,mkcapflags) 67 $(call if_changed,mkcapflags)
68endif 68endif
69clean-files += capflags.c
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index e2b22df964cd..36d99a337b49 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -28,7 +28,7 @@ function dump_array()
28 # If the /* comment */ starts with a quote string, grab that. 28 # If the /* comment */ starts with a quote string, grab that.
29 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" 29 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
30 [ -z "$VALUE" ] && VALUE="\"$NAME\"" 30 [ -z "$VALUE" ] && VALUE="\"$NAME\""
31 [ "$VALUE" == '""' ] && continue 31 [ "$VALUE" = '""' ] && continue
32 32
33 # Name is uppercase, VALUE is all lowercase 33 # Name is uppercase, VALUE is all lowercase
34 VALUE="$(echo "$VALUE" | tr A-Z a-z)" 34 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3c895d480cd7..073983398364 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
568}; 568};
569 569
570struct event_constraint intel_slm_pebs_event_constraints[] = { 570struct event_constraint intel_slm_pebs_event_constraints[] = {
571 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ 571 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
572 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), 572 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
573 /* Allow all events as PEBS with no flags */ 573 /* Allow all events as PEBS with no flags */
574 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), 574 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
575 EVENT_CONSTRAINT_END 575 EVENT_CONSTRAINT_END
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 673f930c700f..6e434f8e5fc8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var = \
103 103
104#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ 104#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
105 105
106#define RAPL_EVENT_ATTR_STR(_name, v, str) \
107static struct perf_pmu_events_attr event_attr_##v = { \
108 .attr = __ATTR(_name, 0444, rapl_sysfs_show, NULL), \
109 .id = 0, \
110 .event_str = str, \
111};
112
106struct rapl_pmu { 113struct rapl_pmu {
107 spinlock_t lock; 114 spinlock_t lock;
108 int hw_unit; /* 1/2^hw_unit Joule */ 115 int hw_unit; /* 1/2^hw_unit Joule */
@@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = {
379 .attrs = rapl_pmu_attrs, 386 .attrs = rapl_pmu_attrs,
380}; 387};
381 388
382EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); 389static ssize_t rapl_sysfs_show(struct device *dev,
383EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); 390 struct device_attribute *attr,
384EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); 391 char *page)
385EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); 392{
393 struct perf_pmu_events_attr *pmu_attr = \
394 container_of(attr, struct perf_pmu_events_attr, attr);
395
396 if (pmu_attr->event_str)
397 return sprintf(page, "%s", pmu_attr->event_str);
398
399 return 0;
400}
401
402RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
403RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
404RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
405RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
386 406
387EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); 407RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
388EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); 408RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
389EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); 409RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
390EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); 410RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
391 411
392/* 412/*
393 * we compute in 0.23 nJ increments regardless of MSR 413 * we compute in 0.23 nJ increments regardless of MSR
394 */ 414 */
395EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); 415RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
396EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); 416RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
397EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); 417RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
398EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); 418RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
399 419
400static struct attribute *rapl_events_srv_attr[] = { 420static struct attribute *rapl_events_srv_attr[] = {
401 EVENT_PTR(rapl_cores), 421 EVENT_PTR(rapl_cores),
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 18eb78bbdd10..863d9b02563e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -17,7 +17,7 @@
17#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 17#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
18#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 18#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
19#define UNCORE_EXTRA_PCI_DEV 0xff 19#define UNCORE_EXTRA_PCI_DEV 0xff
20#define UNCORE_EXTRA_PCI_DEV_MAX 2 20#define UNCORE_EXTRA_PCI_DEV_MAX 3
21 21
22/* support up to 8 sockets */ 22/* support up to 8 sockets */
23#define UNCORE_SOCKET_MAX 8 23#define UNCORE_SOCKET_MAX 8
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 745b158e9a65..21af6149edf2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
891enum { 891enum {
892 SNBEP_PCI_QPI_PORT0_FILTER, 892 SNBEP_PCI_QPI_PORT0_FILTER,
893 SNBEP_PCI_QPI_PORT1_FILTER, 893 SNBEP_PCI_QPI_PORT1_FILTER,
894 HSWEP_PCI_PCU_3,
894}; 895};
895 896
896static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) 897static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
2026{ 2027{
2027 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2028 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2028 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2029 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2030
2031 /* Detect 6-8 core systems with only two SBOXes */
2032 if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
2033 u32 capid4;
2034
2035 pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
2036 0x94, &capid4);
2037 if (((capid4 >> 6) & 0x3) == 0)
2038 hswep_uncore_sbox.num_boxes = 2;
2039 }
2040
2029 uncore_msr_uncores = hswep_msr_uncores; 2041 uncore_msr_uncores = hswep_msr_uncores;
2030} 2042}
2031 2043
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
2287 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2299 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2288 SNBEP_PCI_QPI_PORT1_FILTER), 2300 SNBEP_PCI_QPI_PORT1_FILTER),
2289 }, 2301 },
2302 { /* PCU.3 (for Capability registers) */
2303 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2304 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2305 HSWEP_PCI_PCU_3),
2306 },
2290 { /* end: all zeroes */ } 2307 { /* end: all zeroes */ }
2291}; 2308};
2292 2309
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f7e3cd50ece0..98f654d466e5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1020 regs->flags &= ~X86_EFLAGS_IF; 1020 regs->flags &= ~X86_EFLAGS_IF;
1021 trace_hardirqs_off(); 1021 trace_hardirqs_off();
1022 regs->ip = (unsigned long)(jp->entry); 1022 regs->ip = (unsigned long)(jp->entry);
1023
1024 /*
1025 * jprobes use jprobe_return() which skips the normal return
1026 * path of the function, and this messes up the accounting of the
1027 * function graph tracer to get messed up.
1028 *
1029 * Pause function graph tracing while performing the jprobe function.
1030 */
1031 pause_graph_tracing();
1023 return 1; 1032 return 1;
1024} 1033}
1025NOKPROBE_SYMBOL(setjmp_pre_handler); 1034NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1048 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1057 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1049 u8 *addr = (u8 *) (regs->ip - 1); 1058 u8 *addr = (u8 *) (regs->ip - 1);
1050 struct jprobe *jp = container_of(p, struct jprobe, kp); 1059 struct jprobe *jp = container_of(p, struct jprobe, kp);
1060 void *saved_sp = kcb->jprobe_saved_sp;
1051 1061
1052 if ((addr > (u8 *) jprobe_return) && 1062 if ((addr > (u8 *) jprobe_return) &&
1053 (addr < (u8 *) jprobe_return_end)) { 1063 (addr < (u8 *) jprobe_return_end)) {
1054 if (stack_addr(regs) != kcb->jprobe_saved_sp) { 1064 if (stack_addr(regs) != saved_sp) {
1055 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; 1065 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1056 printk(KERN_ERR 1066 printk(KERN_ERR
1057 "current sp %p does not match saved sp %p\n", 1067 "current sp %p does not match saved sp %p\n",
1058 stack_addr(regs), kcb->jprobe_saved_sp); 1068 stack_addr(regs), saved_sp);
1059 printk(KERN_ERR "Saved registers for jprobe %p\n", jp); 1069 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1060 show_regs(saved_regs); 1070 show_regs(saved_regs);
1061 printk(KERN_ERR "Current registers\n"); 1071 printk(KERN_ERR "Current registers\n");
1062 show_regs(regs); 1072 show_regs(regs);
1063 BUG(); 1073 BUG();
1064 } 1074 }
1075 /* It's OK to start function graph tracing again */
1076 unpause_graph_tracing();
1065 *regs = kcb->jprobe_saved_regs; 1077 *regs = kcb->jprobe_saved_regs;
1066 memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), 1078 memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1067 kcb->jprobes_stack,
1068 MIN_STACK_SIZE(kcb->jprobe_saved_sp));
1069 preempt_enable_no_resched(); 1079 preempt_enable_no_resched();
1070 return 1; 1080 return 1;
1071 } 1081 }
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index e309cc5c276e..781861cc5ee8 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
78{ 78{
79 return PERF_SAMPLE_REGS_ABI_32; 79 return PERF_SAMPLE_REGS_ABI_32;
80} 80}
81
82void perf_get_regs_user(struct perf_regs *regs_user,
83 struct pt_regs *regs,
84 struct pt_regs *regs_user_copy)
85{
86 regs_user->regs = task_pt_regs(current);
87 regs_user->abi = perf_reg_abi(current);
88}
81#else /* CONFIG_X86_64 */ 89#else /* CONFIG_X86_64 */
82#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ 90#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
83 (1ULL << PERF_REG_X86_ES) | \ 91 (1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
102 else 110 else
103 return PERF_SAMPLE_REGS_ABI_64; 111 return PERF_SAMPLE_REGS_ABI_64;
104} 112}
113
114void perf_get_regs_user(struct perf_regs *regs_user,
115 struct pt_regs *regs,
116 struct pt_regs *regs_user_copy)
117{
118 struct pt_regs *user_regs = task_pt_regs(current);
119
120 /*
121 * If we're in an NMI that interrupted task_pt_regs setup, then
122 * we can't sample user regs at all. This check isn't really
123 * sufficient, though, as we could be in an NMI inside an interrupt
124 * that happened during task_pt_regs setup.
125 */
126 if (regs->sp > (unsigned long)&user_regs->r11 &&
127 regs->sp <= (unsigned long)(user_regs + 1)) {
128 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
129 regs_user->regs = NULL;
130 return;
131 }
132
133 /*
134 * RIP, flags, and the argument registers are usually saved.
135 * orig_ax is probably okay, too.
136 */
137 regs_user_copy->ip = user_regs->ip;
138 regs_user_copy->cx = user_regs->cx;
139 regs_user_copy->dx = user_regs->dx;
140 regs_user_copy->si = user_regs->si;
141 regs_user_copy->di = user_regs->di;
142 regs_user_copy->r8 = user_regs->r8;
143 regs_user_copy->r9 = user_regs->r9;
144 regs_user_copy->r10 = user_regs->r10;
145 regs_user_copy->r11 = user_regs->r11;
146 regs_user_copy->orig_ax = user_regs->orig_ax;
147 regs_user_copy->flags = user_regs->flags;
148
149 /*
150 * Don't even try to report the "rest" regs.
151 */
152 regs_user_copy->bx = -1;
153 regs_user_copy->bp = -1;
154 regs_user_copy->r12 = -1;
155 regs_user_copy->r13 = -1;
156 regs_user_copy->r14 = -1;
157 regs_user_copy->r15 = -1;
158
159 /*
160 * For this to be at all useful, we need a reasonable guess for
161 * sp and the ABI. Be careful: we're in NMI context, and we're
162 * considering current to be the current task, so we should
163 * be careful not to look at any other percpu variables that might
164 * change during context switches.
165 */
166 if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
167 task_thread_info(current)->status & TS_COMPAT) {
168 /* Easy case: we're in a compat syscall. */
169 regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
170 regs_user_copy->sp = user_regs->sp;
171 regs_user_copy->cs = user_regs->cs;
172 regs_user_copy->ss = user_regs->ss;
173 } else if (user_regs->orig_ax != -1) {
174 /*
175 * We're probably in a 64-bit syscall.
176 * Warning: this code is severely racy. At least it's better
177 * than just blindly copying user_regs.
178 */
179 regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
180 regs_user_copy->sp = this_cpu_read(old_rsp);
181 regs_user_copy->cs = __USER_CS;
182 regs_user_copy->ss = __USER_DS;
183 regs_user_copy->cx = -1; /* usually contains garbage */
184 } else {
185 /* We're probably in an interrupt or exception. */
186 regs_user->abi = user_64bit_mode(user_regs) ?
187 PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
188 regs_user_copy->sp = user_regs->sp;
189 regs_user_copy->cs = user_regs->cs;
190 regs_user_copy->ss = user_regs->ss;
191 }
192
193 regs_user->regs = regs_user_copy;
194}
105#endif /* CONFIG_X86_32 */ 195#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 10fbed126b11..f83fc6c5e0ba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
4448 * zap all shadow pages. 4448 * zap all shadow pages.
4449 */ 4449 */
4450 if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { 4450 if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
4451 printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); 4451 printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
4452 kvm_mmu_invalidate_zap_all_pages(kvm); 4452 kvm_mmu_invalidate_zap_all_pages(kvm);
4453 } 4453 }
4454} 4454}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index feb852b04598..d4c58d884838 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5840,53 +5840,10 @@ static __init int hardware_setup(void)
5840 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); 5840 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
5841 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); 5841 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
5842 5842
5843 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
5844 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
5845 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
5846 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
5847 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
5848 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
5849 vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
5850
5851 memcpy(vmx_msr_bitmap_legacy_x2apic,
5852 vmx_msr_bitmap_legacy, PAGE_SIZE);
5853 memcpy(vmx_msr_bitmap_longmode_x2apic,
5854 vmx_msr_bitmap_longmode, PAGE_SIZE);
5855
5856 if (enable_apicv) {
5857 for (msr = 0x800; msr <= 0x8ff; msr++)
5858 vmx_disable_intercept_msr_read_x2apic(msr);
5859
5860 /* According SDM, in x2apic mode, the whole id reg is used.
5861 * But in KVM, it only use the highest eight bits. Need to
5862 * intercept it */
5863 vmx_enable_intercept_msr_read_x2apic(0x802);
5864 /* TMCCT */
5865 vmx_enable_intercept_msr_read_x2apic(0x839);
5866 /* TPR */
5867 vmx_disable_intercept_msr_write_x2apic(0x808);
5868 /* EOI */
5869 vmx_disable_intercept_msr_write_x2apic(0x80b);
5870 /* SELF-IPI */
5871 vmx_disable_intercept_msr_write_x2apic(0x83f);
5872 }
5873
5874 if (enable_ept) {
5875 kvm_mmu_set_mask_ptes(0ull,
5876 (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
5877 (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
5878 0ull, VMX_EPT_EXECUTABLE_MASK);
5879 ept_set_mmio_spte_mask();
5880 kvm_enable_tdp();
5881 } else
5882 kvm_disable_tdp();
5883
5884 update_ple_window_actual_max();
5885
5886 if (setup_vmcs_config(&vmcs_config) < 0) { 5843 if (setup_vmcs_config(&vmcs_config) < 0) {
5887 r = -EIO; 5844 r = -EIO;
5888 goto out7; 5845 goto out7;
5889 } 5846 }
5890 5847
5891 if (boot_cpu_has(X86_FEATURE_NX)) 5848 if (boot_cpu_has(X86_FEATURE_NX))
5892 kvm_enable_efer_bits(EFER_NX); 5849 kvm_enable_efer_bits(EFER_NX);
@@ -5945,6 +5902,49 @@ static __init int hardware_setup(void)
5945 if (nested) 5902 if (nested)
5946 nested_vmx_setup_ctls_msrs(); 5903 nested_vmx_setup_ctls_msrs();
5947 5904
5905 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
5906 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
5907 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
5908 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
5909 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
5910 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
5911 vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
5912
5913 memcpy(vmx_msr_bitmap_legacy_x2apic,
5914 vmx_msr_bitmap_legacy, PAGE_SIZE);
5915 memcpy(vmx_msr_bitmap_longmode_x2apic,
5916 vmx_msr_bitmap_longmode, PAGE_SIZE);
5917
5918 if (enable_apicv) {
5919 for (msr = 0x800; msr <= 0x8ff; msr++)
5920 vmx_disable_intercept_msr_read_x2apic(msr);
5921
5922 /* According SDM, in x2apic mode, the whole id reg is used.
5923 * But in KVM, it only use the highest eight bits. Need to
5924 * intercept it */
5925 vmx_enable_intercept_msr_read_x2apic(0x802);
5926 /* TMCCT */
5927 vmx_enable_intercept_msr_read_x2apic(0x839);
5928 /* TPR */
5929 vmx_disable_intercept_msr_write_x2apic(0x808);
5930 /* EOI */
5931 vmx_disable_intercept_msr_write_x2apic(0x80b);
5932 /* SELF-IPI */
5933 vmx_disable_intercept_msr_write_x2apic(0x83f);
5934 }
5935
5936 if (enable_ept) {
5937 kvm_mmu_set_mask_ptes(0ull,
5938 (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
5939 (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
5940 0ull, VMX_EPT_EXECUTABLE_MASK);
5941 ept_set_mmio_spte_mask();
5942 kvm_enable_tdp();
5943 } else
5944 kvm_disable_tdp();
5945
5946 update_ple_window_actual_max();
5947
5948 return alloc_kvm_area(); 5948 return alloc_kvm_area();
5949 5949
5950out7: 5950out7:
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 2480978b31cc..1313ae6b478b 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -28,7 +28,7 @@
28 28
29/* Verify next sizeof(t) bytes can be on the same instruction */ 29/* Verify next sizeof(t) bytes can be on the same instruction */
30#define validate_next(t, insn, n) \ 30#define validate_next(t, insn, n) \
31 ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr) 31 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
32 32
33#define __get_next(t, insn) \ 33#define __get_next(t, insn) \
34 ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) 34 ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a97ee0801475..08a7d313538a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
438static unsigned long __init get_new_step_size(unsigned long step_size) 438static unsigned long __init get_new_step_size(unsigned long step_size)
439{ 439{
440 /* 440 /*
441 * Explain why we shift by 5 and why we don't have to worry about 441 * Initial mapped size is PMD_SIZE (2M).
442 * 'step_size << 5' overflowing:
443 *
444 * initial mapped size is PMD_SIZE (2M).
445 * We can not set step_size to be PUD_SIZE (1G) yet. 442 * We can not set step_size to be PUD_SIZE (1G) yet.
446 * In worse case, when we cross the 1G boundary, and 443 * In worse case, when we cross the 1G boundary, and
447 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) 444 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
448 * to map 1G range with PTE. Use 5 as shift for now. 445 * to map 1G range with PTE. Hence we use one less than the
446 * difference of page table level shifts.
449 * 447 *
450 * Don't need to worry about overflow, on 32bit, when step_size 448 * Don't need to worry about overflow in the top-down case, on 32bit,
451 * is 0, round_down() returns 0 for start, and that turns it 449 * when step_size is 0, round_down() returns 0 for start, and that
452 * into 0x100000000ULL. 450 * turns it into 0x100000000ULL.
451 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
452 * needs to be taken into consideration by the code below.
453 */ 453 */
454 return step_size << 5; 454 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
455} 455}
456 456
457/** 457/**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
471 unsigned long step_size; 471 unsigned long step_size;
472 unsigned long addr; 472 unsigned long addr;
473 unsigned long mapped_ram_size = 0; 473 unsigned long mapped_ram_size = 0;
474 unsigned long new_mapped_ram_size;
475 474
476 /* xen has big range in reserved near end of ram, skip it at first.*/ 475 /* xen has big range in reserved near end of ram, skip it at first.*/
477 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); 476 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
496 start = map_start; 495 start = map_start;
497 } else 496 } else
498 start = map_start; 497 start = map_start;
499 new_mapped_ram_size = init_range_memory_mapping(start, 498 mapped_ram_size += init_range_memory_mapping(start,
500 last_start); 499 last_start);
501 last_start = start; 500 last_start = start;
502 min_pfn_mapped = last_start >> PAGE_SHIFT; 501 min_pfn_mapped = last_start >> PAGE_SHIFT;
503 /* only increase step_size after big range get mapped */ 502 if (mapped_ram_size >= step_size)
504 if (new_mapped_ram_size > mapped_ram_size)
505 step_size = get_new_step_size(step_size); 503 step_size = get_new_step_size(step_size);
506 mapped_ram_size += new_mapped_ram_size;
507 } 504 }
508 505
509 if (real_end < map_end) 506 if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
524static void __init memory_map_bottom_up(unsigned long map_start, 521static void __init memory_map_bottom_up(unsigned long map_start,
525 unsigned long map_end) 522 unsigned long map_end)
526{ 523{
527 unsigned long next, new_mapped_ram_size, start; 524 unsigned long next, start;
528 unsigned long mapped_ram_size = 0; 525 unsigned long mapped_ram_size = 0;
529 /* step_size need to be small so pgt_buf from BRK could cover it */ 526 /* step_size need to be small so pgt_buf from BRK could cover it */
530 unsigned long step_size = PMD_SIZE; 527 unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
539 * for page table. 536 * for page table.
540 */ 537 */
541 while (start < map_end) { 538 while (start < map_end) {
542 if (map_end - start > step_size) { 539 if (step_size && map_end - start > step_size) {
543 next = round_up(start + 1, step_size); 540 next = round_up(start + 1, step_size);
544 if (next > map_end) 541 if (next > map_end)
545 next = map_end; 542 next = map_end;
546 } else 543 } else {
547 next = map_end; 544 next = map_end;
545 }
548 546
549 new_mapped_ram_size = init_range_memory_mapping(start, next); 547 mapped_ram_size += init_range_memory_mapping(start, next);
550 start = next; 548 start = next;
551 549
552 if (new_mapped_ram_size > mapped_ram_size) 550 if (mapped_ram_size >= step_size)
553 step_size = get_new_step_size(step_size); 551 step_size = get_new_step_size(step_size);
554 mapped_ram_size += new_mapped_ram_size;
555 } 552 }
556} 553}
557 554
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 531d4269e2e3..bd16d6c370ec 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
34 34
35extern asmlinkage void sys_ni_syscall(void); 35extern asmlinkage void sys_ni_syscall(void);
36 36
37const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { 37const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
38 /* 38 /*
39 * Smells like a compiler bug -- it doesn't work 39 * Smells like a compiler bug -- it doesn't work
40 * when the & below is removed. 40 * when the & below is removed.
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 20c3649d0691..5cdfa9db2217 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
47 47
48extern void sys_ni_syscall(void); 48extern void sys_ni_syscall(void);
49 49
50const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { 50const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
51 /* 51 /*
52 * Smells like a compiler bug -- it doesn't work 52 * Smells like a compiler bug -- it doesn't work
53 * when the & below is removed. 53 * when the & below is removed.
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 009495b9ab4b..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
41 41
42struct linux_binprm; 42struct linux_binprm;
43 43
44/* Put the vdso above the (randomized) stack with another randomized offset. 44/*
45 This way there is no hole in the middle of address space. 45 * Put the vdso above the (randomized) stack with another randomized
46 To save memory make sure it is still in the same PTE as the stack top. 46 * offset. This way there is no hole in the middle of address space.
47 This doesn't give that many random bits. 47 * To save memory make sure it is still in the same PTE as the stack
48 48 * top. This doesn't give that many random bits.
49 Only used for the 64-bit and x32 vdsos. */ 49 *
50 * Note that this algorithm is imperfect: the distribution of the vdso
51 * start address within a PMD is biased toward the end.
52 *
53 * Only used for the 64-bit and x32 vdsos.
54 */
50static unsigned long vdso_addr(unsigned long start, unsigned len) 55static unsigned long vdso_addr(unsigned long start, unsigned len)
51{ 56{
52#ifdef CONFIG_X86_32 57#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
54#else 59#else
55 unsigned long addr, end; 60 unsigned long addr, end;
56 unsigned offset; 61 unsigned offset;
57 end = (start + PMD_SIZE - 1) & PMD_MASK; 62
63 /*
64 * Round up the start address. It can start out unaligned as a result
65 * of stack start randomization.
66 */
67 start = PAGE_ALIGN(start);
68
69 /* Round the lowest possible end address up to a PMD boundary. */
70 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
58 if (end >= TASK_SIZE_MAX) 71 if (end >= TASK_SIZE_MAX)
59 end = TASK_SIZE_MAX; 72 end = TASK_SIZE_MAX;
60 end -= len; 73 end -= len;
61 /* This loses some more bits than a modulo, but is cheaper */ 74
62 offset = get_random_int() & (PTRS_PER_PTE - 1); 75 if (end > start) {
63 addr = start + (offset << PAGE_SHIFT); 76 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
64 if (addr >= end) 77 addr = start + (offset << PAGE_SHIFT);
65 addr = end; 78 } else {
79 addr = start;
80 }
66 81
67 /* 82 /*
68 * page-align it here so that get_unmapped_area doesn't 83 * Forcibly align the final address in case we have a hardware
69 * align it wrongfully again to the next page. addr can come in 4K 84 * issue that requires alignment for performance reasons.
70 * unaligned here as a result of stack start randomization.
71 */ 85 */
72 addr = PAGE_ALIGN(addr);
73 addr = align_vdso_addr(addr); 86 addr = align_vdso_addr(addr);
74 87
75 return addr; 88 return addr;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6bf3a13e3e0f..78a881b7fc41 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -40,6 +40,7 @@
40#include <xen/interface/physdev.h> 40#include <xen/interface/physdev.h>
41#include <xen/interface/vcpu.h> 41#include <xen/interface/vcpu.h>
42#include <xen/interface/memory.h> 42#include <xen/interface/memory.h>
43#include <xen/interface/nmi.h>
43#include <xen/interface/xen-mca.h> 44#include <xen/interface/xen-mca.h>
44#include <xen/features.h> 45#include <xen/features.h>
45#include <xen/page.h> 46#include <xen/page.h>
@@ -66,6 +67,7 @@
66#include <asm/reboot.h> 67#include <asm/reboot.h>
67#include <asm/stackprotector.h> 68#include <asm/stackprotector.h>
68#include <asm/hypervisor.h> 69#include <asm/hypervisor.h>
70#include <asm/mach_traps.h>
69#include <asm/mwait.h> 71#include <asm/mwait.h>
70#include <asm/pci_x86.h> 72#include <asm/pci_x86.h>
71#include <asm/pat.h> 73#include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
1351 .emergency_restart = xen_emergency_restart, 1353 .emergency_restart = xen_emergency_restart,
1352}; 1354};
1353 1355
1356static unsigned char xen_get_nmi_reason(void)
1357{
1358 unsigned char reason = 0;
1359
1360 /* Construct a value which looks like it came from port 0x61. */
1361 if (test_bit(_XEN_NMIREASON_io_error,
1362 &HYPERVISOR_shared_info->arch.nmi_reason))
1363 reason |= NMI_REASON_IOCHK;
1364 if (test_bit(_XEN_NMIREASON_pci_serr,
1365 &HYPERVISOR_shared_info->arch.nmi_reason))
1366 reason |= NMI_REASON_SERR;
1367
1368 return reason;
1369}
1370
1354static void __init xen_boot_params_init_edd(void) 1371static void __init xen_boot_params_init_edd(void)
1355{ 1372{
1356#if IS_ENABLED(CONFIG_EDD) 1373#if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
1535 pv_info = xen_info; 1552 pv_info = xen_info;
1536 pv_init_ops = xen_init_ops; 1553 pv_init_ops = xen_init_ops;
1537 pv_apic_ops = xen_apic_ops; 1554 pv_apic_ops = xen_apic_ops;
1538 if (!xen_pvh_domain()) 1555 if (!xen_pvh_domain()) {
1539 pv_cpu_ops = xen_cpu_ops; 1556 pv_cpu_ops = xen_cpu_ops;
1540 1557
1558 x86_platform.get_nmi_reason = xen_get_nmi_reason;
1559 }
1560
1541 if (xen_feature(XENFEAT_auto_translated_physmap)) 1561 if (xen_feature(XENFEAT_auto_translated_physmap))
1542 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; 1562 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1543 else 1563 else
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index edbc7a63fd73..70fb5075c901 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
167 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 167 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
168} 168}
169 169
170/* Only to be called in case of a race for a page just allocated! */ 170static void __ref free_p2m_page(void *p)
171static void free_p2m_page(void *p)
172{ 171{
173 BUG_ON(!slab_is_available()); 172 if (unlikely(!slab_is_available())) {
173 free_bootmem((unsigned long)p, PAGE_SIZE);
174 return;
175 }
176
174 free_page((unsigned long)p); 177 free_page((unsigned long)p);
175} 178}
176 179
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
375 p2m_missing_pte : p2m_identity_pte; 378 p2m_missing_pte : p2m_identity_pte;
376 for (i = 0; i < PMDS_PER_MID_PAGE; i++) { 379 for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
377 pmdp = populate_extra_pmd( 380 pmdp = populate_extra_pmd(
378 (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); 381 (unsigned long)(p2m + pfn) + i * PMD_SIZE);
379 set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); 382 set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
380 } 383 }
381 } 384 }
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
436 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual 439 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
437 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! 440 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
438 */ 441 */
439static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) 442static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
440{ 443{
441 pte_t *ptechk; 444 pte_t *ptechk;
442 pte_t *pteret = ptep;
443 pte_t *pte_newpg[PMDS_PER_MID_PAGE]; 445 pte_t *pte_newpg[PMDS_PER_MID_PAGE];
444 pmd_t *pmdp; 446 pmd_t *pmdp;
445 unsigned int level; 447 unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
473 if (ptechk == pte_pg) { 475 if (ptechk == pte_pg) {
474 set_pmd(pmdp, 476 set_pmd(pmdp,
475 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); 477 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
476 if (vaddr == (addr & ~(PMD_SIZE - 1)))
477 pteret = pte_offset_kernel(pmdp, addr);
478 pte_newpg[i] = NULL; 478 pte_newpg[i] = NULL;
479 } 479 }
480 480
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
488 vaddr += PMD_SIZE; 488 vaddr += PMD_SIZE;
489 } 489 }
490 490
491 return pteret; 491 return lookup_address(addr, &level);
492} 492}
493 493
494/* 494/*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
517 517
518 if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { 518 if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
519 /* PMD level is missing, allocate a new one */ 519 /* PMD level is missing, allocate a new one */
520 ptep = alloc_p2m_pmd(addr, ptep, pte_pg); 520 ptep = alloc_p2m_pmd(addr, pte_pg);
521 if (!ptep) 521 if (!ptep)
522 return false; 522 return false;
523 } 523 }
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dfd77dec8e2b..865e56cea7a0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
140unsigned long __ref xen_chk_extra_mem(unsigned long pfn) 140unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
141{ 141{
142 int i; 142 int i;
143 unsigned long addr = PFN_PHYS(pfn); 143 phys_addr_t addr = PFN_PHYS(pfn);
144 144
145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
146 if (addr >= xen_extra_mem[i].start && 146 if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
160 int i; 160 int i;
161 161
162 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 162 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
163 if (!xen_extra_mem[i].size)
164 continue;
163 pfn_s = PFN_DOWN(xen_extra_mem[i].start); 165 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
164 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); 166 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
165 for (pfn = pfn_s; pfn < pfn_e; pfn++) 167 for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
229 * as a fallback if the remapping fails. 231 * as a fallback if the remapping fails.
230 */ 232 */
231static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, 233static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
232 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, 234 unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
233 unsigned long *released)
234{ 235{
235 unsigned long len = 0;
236 unsigned long pfn, end; 236 unsigned long pfn, end;
237 int ret; 237 int ret;
238 238
239 WARN_ON(start_pfn > end_pfn); 239 WARN_ON(start_pfn > end_pfn);
240 240
241 /* Release pages first. */
241 end = min(end_pfn, nr_pages); 242 end = min(end_pfn, nr_pages);
242 for (pfn = start_pfn; pfn < end; pfn++) { 243 for (pfn = start_pfn; pfn < end; pfn++) {
243 unsigned long mfn = pfn_to_mfn(pfn); 244 unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
250 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); 251 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
251 252
252 if (ret == 1) { 253 if (ret == 1) {
254 (*released)++;
253 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) 255 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
254 break; 256 break;
255 len++;
256 } else 257 } else
257 break; 258 break;
258 } 259 }
259 260
260 /* Need to release pages first */ 261 set_phys_range_identity(start_pfn, end_pfn);
261 *released += len;
262 *identity += set_phys_range_identity(start_pfn, end_pfn);
263} 262}
264 263
265/* 264/*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
287 } 286 }
288 287
289 /* Update kernel mapping, but not for highmem. */ 288 /* Update kernel mapping, but not for highmem. */
290 if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) 289 if (pfn >= PFN_UP(__pa(high_memory - 1)))
291 return; 290 return;
292 291
293 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), 292 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
318 unsigned long ident_pfn_iter, remap_pfn_iter; 317 unsigned long ident_pfn_iter, remap_pfn_iter;
319 unsigned long ident_end_pfn = start_pfn + size; 318 unsigned long ident_end_pfn = start_pfn + size;
320 unsigned long left = size; 319 unsigned long left = size;
321 unsigned long ident_cnt = 0;
322 unsigned int i, chunk; 320 unsigned int i, chunk;
323 321
324 WARN_ON(size == 0); 322 WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
347 xen_remap_mfn = mfn; 345 xen_remap_mfn = mfn;
348 346
349 /* Set identity map */ 347 /* Set identity map */
350 ident_cnt += set_phys_range_identity(ident_pfn_iter, 348 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
351 ident_pfn_iter + chunk);
352 349
353 left -= chunk; 350 left -= chunk;
354 } 351 }
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
371static unsigned long __init xen_set_identity_and_remap_chunk( 368static unsigned long __init xen_set_identity_and_remap_chunk(
372 const struct e820entry *list, size_t map_size, unsigned long start_pfn, 369 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
373 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, 370 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
374 unsigned long *identity, unsigned long *released) 371 unsigned long *released, unsigned long *remapped)
375{ 372{
376 unsigned long pfn; 373 unsigned long pfn;
377 unsigned long i = 0; 374 unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
386 /* Do not remap pages beyond the current allocation */ 383 /* Do not remap pages beyond the current allocation */
387 if (cur_pfn >= nr_pages) { 384 if (cur_pfn >= nr_pages) {
388 /* Identity map remaining pages */ 385 /* Identity map remaining pages */
389 *identity += set_phys_range_identity(cur_pfn, 386 set_phys_range_identity(cur_pfn, cur_pfn + size);
390 cur_pfn + size);
391 break; 387 break;
392 } 388 }
393 if (cur_pfn + size > nr_pages) 389 if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
398 if (!remap_range_size) { 394 if (!remap_range_size) {
399 pr_warning("Unable to find available pfn range, not remapping identity pages\n"); 395 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
400 xen_set_identity_and_release_chunk(cur_pfn, 396 xen_set_identity_and_release_chunk(cur_pfn,
401 cur_pfn + left, nr_pages, identity, released); 397 cur_pfn + left, nr_pages, released);
402 break; 398 break;
403 } 399 }
404 /* Adjust size to fit in current e820 RAM region */ 400 /* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
410 /* Update variables to reflect new mappings. */ 406 /* Update variables to reflect new mappings. */
411 i += size; 407 i += size;
412 remap_pfn += size; 408 remap_pfn += size;
413 *identity += size; 409 *remapped += size;
414 } 410 }
415 411
416 /* 412 /*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
427 423
428static void __init xen_set_identity_and_remap( 424static void __init xen_set_identity_and_remap(
429 const struct e820entry *list, size_t map_size, unsigned long nr_pages, 425 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
430 unsigned long *released) 426 unsigned long *released, unsigned long *remapped)
431{ 427{
432 phys_addr_t start = 0; 428 phys_addr_t start = 0;
433 unsigned long identity = 0;
434 unsigned long last_pfn = nr_pages; 429 unsigned long last_pfn = nr_pages;
435 const struct e820entry *entry; 430 const struct e820entry *entry;
436 unsigned long num_released = 0; 431 unsigned long num_released = 0;
432 unsigned long num_remapped = 0;
437 int i; 433 int i;
438 434
439 /* 435 /*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
460 last_pfn = xen_set_identity_and_remap_chunk( 456 last_pfn = xen_set_identity_and_remap_chunk(
461 list, map_size, start_pfn, 457 list, map_size, start_pfn,
462 end_pfn, nr_pages, last_pfn, 458 end_pfn, nr_pages, last_pfn,
463 &identity, &num_released); 459 &num_released, &num_remapped);
464 start = end; 460 start = end;
465 } 461 }
466 } 462 }
467 463
468 *released = num_released; 464 *released = num_released;
465 *remapped = num_remapped;
469 466
470 pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
471 pr_info("Released %ld page(s)\n", num_released); 467 pr_info("Released %ld page(s)\n", num_released);
472} 468}
473 469
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
586 struct xen_memory_map memmap; 582 struct xen_memory_map memmap;
587 unsigned long max_pages; 583 unsigned long max_pages;
588 unsigned long extra_pages = 0; 584 unsigned long extra_pages = 0;
585 unsigned long remapped_pages;
589 int i; 586 int i;
590 int op; 587 int op;
591 588
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
635 * underlying RAM. 632 * underlying RAM.
636 */ 633 */
637 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, 634 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
638 &xen_released_pages); 635 &xen_released_pages, &remapped_pages);
639 636
640 extra_pages += xen_released_pages; 637 extra_pages += xen_released_pages;
638 extra_pages += remapped_pages;
641 639
642 /* 640 /*
643 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 641 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f473d268d387..69087341d9ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
391 391
392struct xen_clock_event_device { 392struct xen_clock_event_device {
393 struct clock_event_device evt; 393 struct clock_event_device evt;
394 char *name; 394 char name[16];
395}; 395};
396static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; 396static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
397 397
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
420 if (evt->irq >= 0) { 420 if (evt->irq >= 0) {
421 unbind_from_irqhandler(evt->irq, NULL); 421 unbind_from_irqhandler(evt->irq, NULL);
422 evt->irq = -1; 422 evt->irq = -1;
423 kfree(per_cpu(xen_clock_events, cpu).name);
424 per_cpu(xen_clock_events, cpu).name = NULL;
425 } 423 }
426} 424}
427 425
428void xen_setup_timer(int cpu) 426void xen_setup_timer(int cpu)
429{ 427{
430 char *name; 428 struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
431 struct clock_event_device *evt; 429 struct clock_event_device *evt = &xevt->evt;
432 int irq; 430 int irq;
433 431
434 evt = &per_cpu(xen_clock_events, cpu).evt;
435 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); 432 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
436 if (evt->irq >= 0) 433 if (evt->irq >= 0)
437 xen_teardown_timer(cpu); 434 xen_teardown_timer(cpu);
438 435
439 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 436 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
440 437
441 name = kasprintf(GFP_KERNEL, "timer%d", cpu); 438 snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
442 if (!name)
443 name = "<timer kasprintf failed>";
444 439
445 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 440 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
446 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| 441 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
447 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, 442 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
448 name, NULL); 443 xevt->name, NULL);
449 (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); 444 (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
450 445
451 memcpy(evt, xen_clockevent, sizeof(*evt)); 446 memcpy(evt, xen_clockevent, sizeof(*evt));
452 447
453 evt->cpumask = cpumask_of(cpu); 448 evt->cpumask = cpumask_of(cpu);
454 evt->irq = irq; 449 evt->irq = irq;
455 per_cpu(xen_clock_events, cpu).name = name;
456} 450}
457 451
458 452
459void xen_setup_cpu_clockevents(void) 453void xen_setup_cpu_clockevents(void)
460{ 454{
461 BUG_ON(preemptible());
462
463 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); 455 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
464} 456}
465 457
diff --git a/block/blk-core.c b/block/blk-core.c
index 30f6153a40c2..3ad405571dcc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
473} 473}
474EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 474EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
475 475
476void blk_set_queue_dying(struct request_queue *q)
477{
478 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
479
480 if (q->mq_ops)
481 blk_mq_wake_waiters(q);
482 else {
483 struct request_list *rl;
484
485 blk_queue_for_each_rl(rl, q) {
486 if (rl->rq_pool) {
487 wake_up(&rl->wait[BLK_RW_SYNC]);
488 wake_up(&rl->wait[BLK_RW_ASYNC]);
489 }
490 }
491 }
492}
493EXPORT_SYMBOL_GPL(blk_set_queue_dying);
494
476/** 495/**
477 * blk_cleanup_queue - shutdown a request queue 496 * blk_cleanup_queue - shutdown a request queue
478 * @q: request queue to shutdown 497 * @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
486 505
487 /* mark @q DYING, no new request or merges will be allowed afterwards */ 506 /* mark @q DYING, no new request or merges will be allowed afterwards */
488 mutex_lock(&q->sysfs_lock); 507 mutex_lock(&q->sysfs_lock);
489 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 508 blk_set_queue_dying(q);
490 spin_lock_irq(lock); 509 spin_lock_irq(lock);
491 510
492 /* 511 /*
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 32e8dbb9ad1c..60c9d4a93fe4 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
68} 68}
69 69
70/* 70/*
71 * Wakeup all potentially sleeping on normal (non-reserved) tags 71 * Wakeup all potentially sleeping on tags
72 */ 72 */
73static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) 73void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
74{ 74{
75 struct blk_mq_bitmap_tags *bt; 75 struct blk_mq_bitmap_tags *bt;
76 int i, wake_index; 76 int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
85 85
86 wake_index = bt_index_inc(wake_index); 86 wake_index = bt_index_inc(wake_index);
87 } 87 }
88
89 if (include_reserve) {
90 bt = &tags->breserved_tags;
91 if (waitqueue_active(&bt->bs[0].wait))
92 wake_up(&bt->bs[0].wait);
93 }
88} 94}
89 95
90/* 96/*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
100 106
101 atomic_dec(&tags->active_queues); 107 atomic_dec(&tags->active_queues);
102 108
103 blk_mq_tag_wakeup_all(tags); 109 blk_mq_tag_wakeup_all(tags, false);
104} 110}
105 111
106/* 112/*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
584 * static and should never need resizing. 590 * static and should never need resizing.
585 */ 591 */
586 bt_update_count(&tags->bitmap_tags, tdepth); 592 bt_update_count(&tags->bitmap_tags, tdepth);
587 blk_mq_tag_wakeup_all(tags); 593 blk_mq_tag_wakeup_all(tags, false);
588 return 0; 594 return 0;
589} 595}
590 596
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 6206ed17ef76..a6fa0fc9d41a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
54extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); 54extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
55extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); 55extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
56extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); 56extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
57extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
57 58
58enum { 59enum {
59 BLK_MQ_TAG_CACHE_MIN = 1, 60 BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da1ab5641227..2f95747c287e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
107 wake_up_all(&q->mq_freeze_wq); 107 wake_up_all(&q->mq_freeze_wq);
108} 108}
109 109
110static void blk_mq_freeze_queue_start(struct request_queue *q) 110void blk_mq_freeze_queue_start(struct request_queue *q)
111{ 111{
112 bool freeze; 112 bool freeze;
113 113
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
120 blk_mq_run_queues(q, false); 120 blk_mq_run_queues(q, false);
121 } 121 }
122} 122}
123EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
123 124
124static void blk_mq_freeze_queue_wait(struct request_queue *q) 125static void blk_mq_freeze_queue_wait(struct request_queue *q)
125{ 126{
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
136 blk_mq_freeze_queue_wait(q); 137 blk_mq_freeze_queue_wait(q);
137} 138}
138 139
139static void blk_mq_unfreeze_queue(struct request_queue *q) 140void blk_mq_unfreeze_queue(struct request_queue *q)
140{ 141{
141 bool wake; 142 bool wake;
142 143
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
149 wake_up_all(&q->mq_freeze_wq); 150 wake_up_all(&q->mq_freeze_wq);
150 } 151 }
151} 152}
153EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
154
155void blk_mq_wake_waiters(struct request_queue *q)
156{
157 struct blk_mq_hw_ctx *hctx;
158 unsigned int i;
159
160 queue_for_each_hw_ctx(q, hctx, i)
161 if (blk_mq_hw_queue_mapped(hctx))
162 blk_mq_tag_wakeup_all(hctx->tags, true);
163
164 /*
165 * If we are called because the queue has now been marked as
166 * dying, we need to ensure that processes currently waiting on
167 * the queue are notified as well.
168 */
169 wake_up_all(&q->mq_freeze_wq);
170}
152 171
153bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) 172bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
154{ 173{
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
258 ctx = alloc_data.ctx; 277 ctx = alloc_data.ctx;
259 } 278 }
260 blk_mq_put_ctx(ctx); 279 blk_mq_put_ctx(ctx);
261 if (!rq) 280 if (!rq) {
281 blk_mq_queue_exit(q);
262 return ERR_PTR(-EWOULDBLOCK); 282 return ERR_PTR(-EWOULDBLOCK);
283 }
263 return rq; 284 return rq;
264} 285}
265EXPORT_SYMBOL(blk_mq_alloc_request); 286EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
383} 404}
384EXPORT_SYMBOL(blk_mq_complete_request); 405EXPORT_SYMBOL(blk_mq_complete_request);
385 406
407int blk_mq_request_started(struct request *rq)
408{
409 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
410}
411EXPORT_SYMBOL_GPL(blk_mq_request_started);
412
386void blk_mq_start_request(struct request *rq) 413void blk_mq_start_request(struct request *rq)
387{ 414{
388 struct request_queue *q = rq->q; 415 struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
500} 527}
501EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 528EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
502 529
530void blk_mq_cancel_requeue_work(struct request_queue *q)
531{
532 cancel_work_sync(&q->requeue_work);
533}
534EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
535
503void blk_mq_kick_requeue_list(struct request_queue *q) 536void blk_mq_kick_requeue_list(struct request_queue *q)
504{ 537{
505 kblockd_schedule_work(&q->requeue_work); 538 kblockd_schedule_work(&q->requeue_work);
506} 539}
507EXPORT_SYMBOL(blk_mq_kick_requeue_list); 540EXPORT_SYMBOL(blk_mq_kick_requeue_list);
508 541
542void blk_mq_abort_requeue_list(struct request_queue *q)
543{
544 unsigned long flags;
545 LIST_HEAD(rq_list);
546
547 spin_lock_irqsave(&q->requeue_lock, flags);
548 list_splice_init(&q->requeue_list, &rq_list);
549 spin_unlock_irqrestore(&q->requeue_lock, flags);
550
551 while (!list_empty(&rq_list)) {
552 struct request *rq;
553
554 rq = list_first_entry(&rq_list, struct request, queuelist);
555 list_del_init(&rq->queuelist);
556 rq->errors = -EIO;
557 blk_mq_end_request(rq, rq->errors);
558 }
559}
560EXPORT_SYMBOL(blk_mq_abort_requeue_list);
561
509static inline bool is_flush_request(struct request *rq, 562static inline bool is_flush_request(struct request *rq,
510 struct blk_flush_queue *fq, unsigned int tag) 563 struct blk_flush_queue *fq, unsigned int tag)
511{ 564{
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
566 break; 619 break;
567 } 620 }
568} 621}
569 622
570static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 623static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
571 struct request *rq, void *priv, bool reserved) 624 struct request *rq, void *priv, bool reserved)
572{ 625{
573 struct blk_mq_timeout_data *data = priv; 626 struct blk_mq_timeout_data *data = priv;
574 627
575 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 628 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
629 /*
630 * If a request wasn't started before the queue was
631 * marked dying, kill it here or it'll go unnoticed.
632 */
633 if (unlikely(blk_queue_dying(rq->q))) {
634 rq->errors = -EIO;
635 blk_mq_complete_request(rq);
636 }
637 return;
638 }
639 if (rq->cmd_flags & REQ_NO_TIMEOUT)
576 return; 640 return;
577 641
578 if (time_after_eq(jiffies, rq->deadline)) { 642 if (time_after_eq(jiffies, rq->deadline)) {
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
1601 hctx->queue = q; 1665 hctx->queue = q;
1602 hctx->queue_num = hctx_idx; 1666 hctx->queue_num = hctx_idx;
1603 hctx->flags = set->flags; 1667 hctx->flags = set->flags;
1604 hctx->cmd_size = set->cmd_size;
1605 1668
1606 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1669 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1607 blk_mq_hctx_notify, hctx); 1670 blk_mq_hctx_notify, hctx);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 206230e64f79..4f4f943c22c3 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
32void blk_mq_clone_flush_request(struct request *flush_rq, 32void blk_mq_clone_flush_request(struct request *flush_rq,
33 struct request *orig_rq); 33 struct request *orig_rq);
34int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 34int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
35void blk_mq_wake_waiters(struct request_queue *q);
35 36
36/* 37/*
37 * CPU hotplug helpers 38 * CPU hotplug helpers
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 56c025894cdf..246dfb16c3d9 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
190 struct request_queue *q = req->q; 190 struct request_queue *q = req->q;
191 unsigned long expiry; 191 unsigned long expiry;
192 192
193 if (req->cmd_flags & REQ_NO_TIMEOUT)
194 return;
195
193 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ 196 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
194 if (!q->mq_ops && !q->rq_timed_out_fn) 197 if (!q->mq_ops && !q->rq_timed_out_fn)
195 return; 198 return;
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 9b3c54c1cbe8..3dd101144a58 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1475,3 +1475,4 @@ module_exit(aes_fini);
1475MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1475MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1476MODULE_LICENSE("Dual BSD/GPL"); 1476MODULE_LICENSE("Dual BSD/GPL");
1477MODULE_ALIAS_CRYPTO("aes"); 1477MODULE_ALIAS_CRYPTO("aes");
1478MODULE_ALIAS_CRYPTO("aes-generic");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 1fa7bc31be63..4665b79c729a 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
455{ 455{
456 struct af_alg_completion *completion = req->data; 456 struct af_alg_completion *completion = req->data;
457 457
458 if (err == -EINPROGRESS)
459 return;
460
458 completion->err = err; 461 completion->err = err;
459 complete(&completion->completion); 462 complete(&completion->completion);
460} 463}
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index b4485a108389..6f5bebc9bf01 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -477,3 +477,4 @@ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
477module_init(prng_mod_init); 477module_init(prng_mod_init);
478module_exit(prng_mod_fini); 478module_exit(prng_mod_fini);
479MODULE_ALIAS_CRYPTO("stdrng"); 479MODULE_ALIAS_CRYPTO("stdrng");
480MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 7bd71f02d0dd..87b392a77a93 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -139,3 +139,4 @@ module_exit(blowfish_mod_fini);
139MODULE_LICENSE("GPL"); 139MODULE_LICENSE("GPL");
140MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); 140MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
141MODULE_ALIAS_CRYPTO("blowfish"); 141MODULE_ALIAS_CRYPTO("blowfish");
142MODULE_ALIAS_CRYPTO("blowfish-generic");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 1b74c5a3e891..a02286bf319e 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1099,3 +1099,4 @@ module_exit(camellia_fini);
1099MODULE_DESCRIPTION("Camellia Cipher Algorithm"); 1099MODULE_DESCRIPTION("Camellia Cipher Algorithm");
1100MODULE_LICENSE("GPL"); 1100MODULE_LICENSE("GPL");
1101MODULE_ALIAS_CRYPTO("camellia"); 1101MODULE_ALIAS_CRYPTO("camellia");
1102MODULE_ALIAS_CRYPTO("camellia-generic");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 84c86db67ec7..df5c72629383 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -550,3 +550,4 @@ module_exit(cast5_mod_fini);
550MODULE_LICENSE("GPL"); 550MODULE_LICENSE("GPL");
551MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); 551MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
552MODULE_ALIAS_CRYPTO("cast5"); 552MODULE_ALIAS_CRYPTO("cast5");
553MODULE_ALIAS_CRYPTO("cast5-generic");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index f408f0bd8de2..058c8d755d03 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -292,3 +292,4 @@ module_exit(cast6_mod_fini);
292MODULE_LICENSE("GPL"); 292MODULE_LICENSE("GPL");
293MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); 293MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
294MODULE_ALIAS_CRYPTO("cast6"); 294MODULE_ALIAS_CRYPTO("cast6");
295MODULE_ALIAS_CRYPTO("cast6-generic");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 2a062025749d..06f1b60f02b2 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -171,4 +171,5 @@ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
171MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); 171MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
172MODULE_LICENSE("GPL"); 172MODULE_LICENSE("GPL");
173MODULE_ALIAS_CRYPTO("crc32c"); 173MODULE_ALIAS_CRYPTO("crc32c");
174MODULE_ALIAS_CRYPTO("crc32c-generic");
174MODULE_SOFTDEP("pre: crc32c"); 175MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 08bb4f504520..c1229614c7e3 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -125,3 +125,4 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
125MODULE_DESCRIPTION("T10 DIF CRC calculation."); 125MODULE_DESCRIPTION("T10 DIF CRC calculation.");
126MODULE_LICENSE("GPL"); 126MODULE_LICENSE("GPL");
127MODULE_ALIAS_CRYPTO("crct10dif"); 127MODULE_ALIAS_CRYPTO("crct10dif");
128MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 42912948776b..a71720544d11 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -983,8 +983,6 @@ static struct crypto_alg des_algs[2] = { {
983 .cia_decrypt = des3_ede_decrypt } } 983 .cia_decrypt = des3_ede_decrypt } }
984} }; 984} };
985 985
986MODULE_ALIAS_CRYPTO("des3_ede");
987
988static int __init des_generic_mod_init(void) 986static int __init des_generic_mod_init(void)
989{ 987{
990 return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs)); 988 return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
@@ -1001,4 +999,7 @@ module_exit(des_generic_mod_fini);
1001MODULE_LICENSE("GPL"); 999MODULE_LICENSE("GPL");
1002MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); 1000MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
1003MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>"); 1001MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
1004MODULE_ALIAS("des"); 1002MODULE_ALIAS_CRYPTO("des");
1003MODULE_ALIAS_CRYPTO("des-generic");
1004MODULE_ALIAS_CRYPTO("des3_ede");
1005MODULE_ALIAS_CRYPTO("des3_ede-generic");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 4e97fae9666f..bac70995e064 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -173,3 +173,4 @@ module_exit(ghash_mod_exit);
173MODULE_LICENSE("GPL"); 173MODULE_LICENSE("GPL");
174MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); 174MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
175MODULE_ALIAS_CRYPTO("ghash"); 175MODULE_ALIAS_CRYPTO("ghash");
176MODULE_ALIAS_CRYPTO("ghash-generic");
diff --git a/crypto/krng.c b/crypto/krng.c
index 67c88b331210..0224841b6579 100644
--- a/crypto/krng.c
+++ b/crypto/krng.c
@@ -63,3 +63,4 @@ module_exit(krng_mod_fini);
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64MODULE_DESCRIPTION("Kernel Random Number Generator"); 64MODULE_DESCRIPTION("Kernel Random Number Generator");
65MODULE_ALIAS_CRYPTO("stdrng"); 65MODULE_ALIAS_CRYPTO("stdrng");
66MODULE_ALIAS_CRYPTO("krng");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 3d0f9df30ac9..f550b5d94630 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -249,3 +249,4 @@ module_exit(salsa20_generic_mod_fini);
249MODULE_LICENSE("GPL"); 249MODULE_LICENSE("GPL");
250MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); 250MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
251MODULE_ALIAS_CRYPTO("salsa20"); 251MODULE_ALIAS_CRYPTO("salsa20");
252MODULE_ALIAS_CRYPTO("salsa20-generic");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index a53b5e2af335..94970a794975 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -667,3 +667,4 @@ MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Ci
667MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>"); 667MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
668MODULE_ALIAS_CRYPTO("tnepres"); 668MODULE_ALIAS_CRYPTO("tnepres");
669MODULE_ALIAS_CRYPTO("serpent"); 669MODULE_ALIAS_CRYPTO("serpent");
670MODULE_ALIAS_CRYPTO("serpent-generic");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 039e58cfa155..a3e50c37eb6f 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -154,3 +154,4 @@ MODULE_LICENSE("GPL");
154MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); 154MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
155 155
156MODULE_ALIAS_CRYPTO("sha1"); 156MODULE_ALIAS_CRYPTO("sha1");
157MODULE_ALIAS_CRYPTO("sha1-generic");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 5eb21b120033..b001ff5c2efc 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -385,4 +385,6 @@ MODULE_LICENSE("GPL");
385MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); 385MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
386 386
387MODULE_ALIAS_CRYPTO("sha224"); 387MODULE_ALIAS_CRYPTO("sha224");
388MODULE_ALIAS_CRYPTO("sha224-generic");
388MODULE_ALIAS_CRYPTO("sha256"); 389MODULE_ALIAS_CRYPTO("sha256");
390MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 8d0b19ed4f4b..1c3c3767e079 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -289,4 +289,6 @@ MODULE_LICENSE("GPL");
289MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); 289MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
290 290
291MODULE_ALIAS_CRYPTO("sha384"); 291MODULE_ALIAS_CRYPTO("sha384");
292MODULE_ALIAS_CRYPTO("sha384-generic");
292MODULE_ALIAS_CRYPTO("sha512"); 293MODULE_ALIAS_CRYPTO("sha512");
294MODULE_ALIAS_CRYPTO("sha512-generic");
diff --git a/crypto/tea.c b/crypto/tea.c
index 495be2d0077d..b70b441c7d1e 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -270,6 +270,7 @@ static void __exit tea_mod_fini(void)
270 crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs)); 270 crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
271} 271}
272 272
273MODULE_ALIAS_CRYPTO("tea");
273MODULE_ALIAS_CRYPTO("xtea"); 274MODULE_ALIAS_CRYPTO("xtea");
274MODULE_ALIAS_CRYPTO("xeta"); 275MODULE_ALIAS_CRYPTO("xeta");
275 276
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 6e5651c66cf8..321bc6ff2a9d 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -676,6 +676,7 @@ static void __exit tgr192_mod_fini(void)
676 crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); 676 crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
677} 677}
678 678
679MODULE_ALIAS_CRYPTO("tgr192");
679MODULE_ALIAS_CRYPTO("tgr160"); 680MODULE_ALIAS_CRYPTO("tgr160");
680MODULE_ALIAS_CRYPTO("tgr128"); 681MODULE_ALIAS_CRYPTO("tgr128");
681 682
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 523ad8c4e359..ebf7a3efb572 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
212MODULE_LICENSE("GPL"); 212MODULE_LICENSE("GPL");
213MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); 213MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
214MODULE_ALIAS_CRYPTO("twofish"); 214MODULE_ALIAS_CRYPTO("twofish");
215MODULE_ALIAS_CRYPTO("twofish-generic");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 0de42eb3d040..7ee5a043a988 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1167,6 +1167,7 @@ static void __exit wp512_mod_fini(void)
1167 crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs)); 1167 crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
1168} 1168}
1169 1169
1170MODULE_ALIAS_CRYPTO("wp512");
1170MODULE_ALIAS_CRYPTO("wp384"); 1171MODULE_ALIAS_CRYPTO("wp384");
1171MODULE_ALIAS_CRYPTO("wp256"); 1172MODULE_ALIAS_CRYPTO("wp256");
1172 1173
diff --git a/drivers/Makefile b/drivers/Makefile
index 67d2334dc41e..527a6da8d539 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
50obj-y += tty/ 50obj-y += tty/
51obj-y += char/ 51obj-y += char/
52 52
53# gpu/ comes after char for AGP vs DRM startup 53# iommu/ comes before gpu as gpu are using iommu controllers
54obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
55
56# gpu/ comes after char for AGP vs DRM startup and after iommu
54obj-y += gpu/ 57obj-y += gpu/
55 58
56obj-$(CONFIG_CONNECTOR) += connector/ 59obj-$(CONFIG_CONNECTOR) += connector/
@@ -141,7 +144,6 @@ obj-y += clk/
141 144
142obj-$(CONFIG_MAILBOX) += mailbox/ 145obj-$(CONFIG_MAILBOX) += mailbox/
143obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ 146obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
144obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
145obj-$(CONFIG_REMOTEPROC) += remoteproc/ 147obj-$(CONFIG_REMOTEPROC) += remoteproc/
146obj-$(CONFIG_RPMSG) += rpmsg/ 148obj-$(CONFIG_RPMSG) += rpmsg/
147 149
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1fdf5e07a1c7..1020b1b53a17 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
170 acpi_status status; 170 acpi_status status;
171 int ret; 171 int ret;
172 172
173 if (pr->apic_id == -1) 173 if (pr->phys_id == -1)
174 return -ENODEV; 174 return -ENODEV;
175 175
176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
180 cpu_maps_update_begin(); 180 cpu_maps_update_begin();
181 cpu_hotplug_begin(); 181 cpu_hotplug_begin();
182 182
183 ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); 183 ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
184 if (ret) 184 if (ret)
185 goto out; 185 goto out;
186 186
187 ret = arch_register_cpu(pr->id); 187 ret = arch_register_cpu(pr->id);
188 if (ret) { 188 if (ret) {
189 acpi_unmap_lsapic(pr->id); 189 acpi_unmap_cpu(pr->id);
190 goto out; 190 goto out;
191 } 191 }
192 192
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
215 union acpi_object object = { 0 }; 215 union acpi_object object = { 0 };
216 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 216 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
217 struct acpi_processor *pr = acpi_driver_data(device); 217 struct acpi_processor *pr = acpi_driver_data(device);
218 int apic_id, cpu_index, device_declaration = 0; 218 int phys_id, cpu_index, device_declaration = 0;
219 acpi_status status = AE_OK; 219 acpi_status status = AE_OK;
220 static int cpu0_initialized; 220 static int cpu0_initialized;
221 unsigned long long value; 221 unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
262 pr->acpi_id = value; 262 pr->acpi_id = value;
263 } 263 }
264 264
265 apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); 265 phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
266 if (apic_id < 0) 266 if (phys_id < 0)
267 acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); 267 acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
268 pr->apic_id = apic_id; 268 pr->phys_id = phys_id;
269 269
270 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); 270 cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
271 if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { 271 if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
272 cpu0_initialized = 1; 272 cpu0_initialized = 1;
273 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 273 /*
274 * Handle UP system running SMP kernel, with no CPU
275 * entry in MADT
276 */
274 if ((cpu_index == -1) && (num_online_cpus() == 1)) 277 if ((cpu_index == -1) && (num_online_cpus() == 1))
275 cpu_index = 0; 278 cpu_index = 0;
276 } 279 }
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
458 461
459 /* Remove the CPU. */ 462 /* Remove the CPU. */
460 arch_unregister_cpu(pr->id); 463 arch_unregister_cpu(pr->id);
461 acpi_unmap_lsapic(pr->id); 464 acpi_unmap_cpu(pr->id);
462 465
463 cpu_hotplug_done(); 466 cpu_hotplug_done();
464 cpu_maps_update_done(); 467 cpu_maps_update_done();
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c2daa85fc9f7..c0d44d394ca3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
257 257
258 device->power.state = ACPI_STATE_UNKNOWN; 258 device->power.state = ACPI_STATE_UNKNOWN;
259 if (!acpi_device_is_present(device)) 259 if (!acpi_device_is_present(device))
260 return 0; 260 return -ENXIO;
261 261
262 result = acpi_device_get_power(device, &state); 262 result = acpi_device_get_power(device, &state);
263 if (result) 263 if (result)
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index a27d31d1ba24..9dcf83682e36 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -14,10 +14,10 @@
14 14
15#include "internal.h" 15#include "internal.h"
16 16
17#define DO_ENUMERATION 0x01 17#define INT3401_DEVICE 0X01
18static const struct acpi_device_id int340x_thermal_device_ids[] = { 18static const struct acpi_device_id int340x_thermal_device_ids[] = {
19 {"INT3400", DO_ENUMERATION }, 19 {"INT3400"},
20 {"INT3401"}, 20 {"INT3401", INT3401_DEVICE},
21 {"INT3402"}, 21 {"INT3402"},
22 {"INT3403"}, 22 {"INT3403"},
23 {"INT3404"}, 23 {"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
34 const struct acpi_device_id *id) 34 const struct acpi_device_id *id)
35{ 35{
36#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) 36#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
37 if (id->driver_data == DO_ENUMERATION) 37 acpi_create_platform_device(adev);
38#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
39 /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
40 if (id->driver_data == INT3401_DEVICE)
38 acpi_create_platform_device(adev); 41 acpi_create_platform_device(adev);
39#endif 42#endif
40 return 1; 43 return 1;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 342942f90a10..02e48394276c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
69 unsigned long madt_end, entry; 69 unsigned long madt_end, entry;
70 static struct acpi_table_madt *madt; 70 static struct acpi_table_madt *madt;
71 static int read_madt; 71 static int read_madt;
72 int apic_id = -1; 72 int phys_id = -1; /* CPU hardware ID */
73 73
74 if (!read_madt) { 74 if (!read_madt) {
75 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 75 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
79 } 79 }
80 80
81 if (!madt) 81 if (!madt)
82 return apic_id; 82 return phys_id;
83 83
84 entry = (unsigned long)madt; 84 entry = (unsigned long)madt;
85 madt_end = entry + madt->header.length; 85 madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
91 struct acpi_subtable_header *header = 91 struct acpi_subtable_header *header =
92 (struct acpi_subtable_header *)entry; 92 (struct acpi_subtable_header *)entry;
93 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 93 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
94 if (!map_lapic_id(header, acpi_id, &apic_id)) 94 if (!map_lapic_id(header, acpi_id, &phys_id))
95 break; 95 break;
96 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { 96 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
97 if (!map_x2apic_id(header, type, acpi_id, &apic_id)) 97 if (!map_x2apic_id(header, type, acpi_id, &phys_id))
98 break; 98 break;
99 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 99 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
100 if (!map_lsapic_id(header, type, acpi_id, &apic_id)) 100 if (!map_lsapic_id(header, type, acpi_id, &phys_id))
101 break; 101 break;
102 } 102 }
103 entry += header->length; 103 entry += header->length;
104 } 104 }
105 return apic_id; 105 return phys_id;
106} 106}
107 107
108static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) 108static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
110 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 110 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
111 union acpi_object *obj; 111 union acpi_object *obj;
112 struct acpi_subtable_header *header; 112 struct acpi_subtable_header *header;
113 int apic_id = -1; 113 int phys_id = -1;
114 114
115 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 115 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
116 goto exit; 116 goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
126 126
127 header = (struct acpi_subtable_header *)obj->buffer.pointer; 127 header = (struct acpi_subtable_header *)obj->buffer.pointer;
128 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) 128 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
129 map_lapic_id(header, acpi_id, &apic_id); 129 map_lapic_id(header, acpi_id, &phys_id);
130 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) 130 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
131 map_lsapic_id(header, type, acpi_id, &apic_id); 131 map_lsapic_id(header, type, acpi_id, &phys_id);
132 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) 132 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
133 map_x2apic_id(header, type, acpi_id, &apic_id); 133 map_x2apic_id(header, type, acpi_id, &phys_id);
134 134
135exit: 135exit:
136 kfree(buffer.pointer); 136 kfree(buffer.pointer);
137 return apic_id; 137 return phys_id;
138} 138}
139 139
140int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) 140int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
141{ 141{
142 int apic_id; 142 int phys_id;
143 143
144 apic_id = map_mat_entry(handle, type, acpi_id); 144 phys_id = map_mat_entry(handle, type, acpi_id);
145 if (apic_id == -1) 145 if (phys_id == -1)
146 apic_id = map_madt_entry(type, acpi_id); 146 phys_id = map_madt_entry(type, acpi_id);
147 147
148 return apic_id; 148 return phys_id;
149} 149}
150 150
151int acpi_map_cpuid(int apic_id, u32 acpi_id) 151int acpi_map_cpuid(int phys_id, u32 acpi_id)
152{ 152{
153#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
154 int i; 154 int i;
155#endif 155#endif
156 156
157 if (apic_id == -1) { 157 if (phys_id == -1) {
158 /* 158 /*
159 * On UP processor, there is no _MAT or MADT table. 159 * On UP processor, there is no _MAT or MADT table.
160 * So above apic_id is always set to -1. 160 * So above phys_id is always set to -1.
161 * 161 *
162 * BIOS may define multiple CPU handles even for UP processor. 162 * BIOS may define multiple CPU handles even for UP processor.
163 * For example, 163 * For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
170 * Processor (CPU3, 0x03, 0x00000410, 0x06) {} 170 * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
171 * } 171 * }
172 * 172 *
173 * Ignores apic_id and always returns 0 for the processor 173 * Ignores phys_id and always returns 0 for the processor
174 * handle with acpi id 0 if nr_cpu_ids is 1. 174 * handle with acpi id 0 if nr_cpu_ids is 1.
175 * This should be the case if SMP tables are not found. 175 * This should be the case if SMP tables are not found.
176 * Return -1 for other CPU's handle. 176 * Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
178 if (nr_cpu_ids <= 1 && acpi_id == 0) 178 if (nr_cpu_ids <= 1 && acpi_id == 0)
179 return acpi_id; 179 return acpi_id;
180 else 180 else
181 return apic_id; 181 return phys_id;
182 } 182 }
183 183
184#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
185 for_each_possible_cpu(i) { 185 for_each_possible_cpu(i) {
186 if (cpu_physical_id(i) == apic_id) 186 if (cpu_physical_id(i) == phys_id)
187 return i; 187 return i;
188 } 188 }
189#else 189#else
190 /* In UP kernel, only processor 0 is valid */ 190 /* In UP kernel, only processor 0 is valid */
191 if (apic_id == 0) 191 if (phys_id == 0)
192 return apic_id; 192 return phys_id;
193#endif 193#endif
194 return -1; 194 return -1;
195} 195}
196 196
197int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 197int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
198{ 198{
199 int apic_id; 199 int phys_id;
200 200
201 apic_id = acpi_get_apicid(handle, type, acpi_id); 201 phys_id = acpi_get_phys_id(handle, type, acpi_id);
202 202
203 return acpi_map_cpuid(apic_id, acpi_id); 203 return acpi_map_cpuid(phys_id, acpi_id);
204} 204}
205EXPORT_SYMBOL_GPL(acpi_get_cpuid); 205EXPORT_SYMBOL_GPL(acpi_get_cpuid);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 499536504698..87b704e41877 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
985 state->flags = 0; 985 state->flags = 0;
986 switch (cx->type) { 986 switch (cx->type) {
987 case ACPI_STATE_C1: 987 case ACPI_STATE_C1:
988 if (cx->entry_method != ACPI_CSTATE_FFH)
989 state->flags |= CPUIDLE_FLAG_TIME_INVALID;
990 988
991 state->enter = acpi_idle_enter_c1; 989 state->enter = acpi_idle_enter_c1;
992 state->enter_dead = acpi_idle_play_dead; 990 state->enter_dead = acpi_idle_play_dead;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 16914cc30882..dc4d8960684a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
1001 if (device->wakeup.flags.valid) 1001 if (device->wakeup.flags.valid)
1002 acpi_power_resources_list_free(&device->wakeup.resources); 1002 acpi_power_resources_list_free(&device->wakeup.resources);
1003 1003
1004 if (!device->flags.power_manageable) 1004 if (!device->power.flags.power_resources)
1005 return; 1005 return;
1006 1006
1007 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { 1007 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
1744 device->power.flags.power_resources) 1744 device->power.flags.power_resources)
1745 device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; 1745 device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
1746 1746
1747 if (acpi_bus_init_power(device)) { 1747 if (acpi_bus_init_power(device))
1748 acpi_free_power_resources_lists(device);
1749 device->flags.power_manageable = 0; 1748 device->flags.power_manageable = 0;
1750 }
1751} 1749}
1752 1750
1753static void acpi_bus_get_flags(struct acpi_device *device) 1751static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
2371 /* Skip devices that are not present. */ 2369 /* Skip devices that are not present. */
2372 if (!acpi_device_is_present(device)) { 2370 if (!acpi_device_is_present(device)) {
2373 device->flags.visited = false; 2371 device->flags.visited = false;
2372 device->flags.power_manageable = 0;
2374 return; 2373 return;
2375 } 2374 }
2376 if (device->handler) 2375 if (device->handler)
2377 goto ok; 2376 goto ok;
2378 2377
2379 if (!device->flags.initialized) { 2378 if (!device->flags.initialized) {
2380 acpi_bus_update_power(device, NULL); 2379 device->flags.power_manageable =
2380 device->power.states[ACPI_STATE_D0].flags.valid;
2381 if (acpi_bus_init_power(device))
2382 device->flags.power_manageable = 0;
2383
2381 device->flags.initialized = true; 2384 device->flags.initialized = true;
2382 } 2385 }
2383 device->flags.visited = false; 2386 device->flags.visited = false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1eaadff2e198..032db459370f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
505 DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), 505 DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
506 }, 506 },
507 }, 507 },
508
509 {
510 .callback = video_disable_native_backlight,
511 .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
512 .matches = {
513 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
514 DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
515 },
516 },
517 {
518 .callback = video_disable_native_backlight,
519 .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
520 .matches = {
521 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
522 DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
523 },
524 },
525
526 {
527 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
528 .callback = video_disable_native_backlight,
529 .ident = "Dell XPS15 L521X",
530 .matches = {
531 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
532 DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
533 },
534 },
508 {} 535 {}
509}; 536};
510 537
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6a103a35ea9b..0d8780c04a5e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2088 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2088 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2089 * on failure. 2089 * on failure.
2090 */ 2090 */
2091static struct generic_pm_domain *of_genpd_get_from_provider( 2091struct generic_pm_domain *of_genpd_get_from_provider(
2092 struct of_phandle_args *genpdspec) 2092 struct of_phandle_args *genpdspec)
2093{ 2093{
2094 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2094 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
2108 2108
2109 return genpd; 2109 return genpd;
2110} 2110}
2111EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2111 2112
2112/** 2113/**
2113 * genpd_dev_pm_detach - Detach a device from its PM domain. 2114 * genpd_dev_pm_detach - Detach a device from its PM domain.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d24dd614a0bd..106c69359306 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
108/* Lock to allow exclusive modification to the device and opp lists */ 108/* Lock to allow exclusive modification to the device and opp lists */
109static DEFINE_MUTEX(dev_opp_list_lock); 109static DEFINE_MUTEX(dev_opp_list_lock);
110 110
111#define opp_rcu_lockdep_assert() \
112do { \
113 rcu_lockdep_assert(rcu_read_lock_held() || \
114 lockdep_is_held(&dev_opp_list_lock), \
115 "Missing rcu_read_lock() or " \
116 "dev_opp_list_lock protection"); \
117} while (0)
118
111/** 119/**
112 * find_device_opp() - find device_opp struct using device pointer 120 * find_device_opp() - find device_opp struct using device pointer
113 * @dev: device pointer used to lookup device OPPs 121 * @dev: device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
208 * This function returns the number of available opps if there are any, 216 * This function returns the number of available opps if there are any,
209 * else returns 0 if none or the corresponding error value. 217 * else returns 0 if none or the corresponding error value.
210 * 218 *
211 * Locking: This function must be called under rcu_read_lock(). This function 219 * Locking: This function takes rcu_read_lock().
212 * internally references two RCU protected structures: device_opp and opp which
213 * are safe as long as we are under a common RCU locked section.
214 */ 220 */
215int dev_pm_opp_get_opp_count(struct device *dev) 221int dev_pm_opp_get_opp_count(struct device *dev)
216{ 222{
@@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
218 struct dev_pm_opp *temp_opp; 224 struct dev_pm_opp *temp_opp;
219 int count = 0; 225 int count = 0;
220 226
227 rcu_read_lock();
228
221 dev_opp = find_device_opp(dev); 229 dev_opp = find_device_opp(dev);
222 if (IS_ERR(dev_opp)) { 230 if (IS_ERR(dev_opp)) {
223 int r = PTR_ERR(dev_opp); 231 count = PTR_ERR(dev_opp);
224 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 232 dev_err(dev, "%s: device OPP not found (%d)\n",
225 return r; 233 __func__, count);
234 goto out_unlock;
226 } 235 }
227 236
228 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 237 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
230 count++; 239 count++;
231 } 240 }
232 241
242out_unlock:
243 rcu_read_unlock();
233 return count; 244 return count;
234} 245}
235EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 246EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
267 struct device_opp *dev_opp; 278 struct device_opp *dev_opp;
268 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 279 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
269 280
281 opp_rcu_lockdep_assert();
282
270 dev_opp = find_device_opp(dev); 283 dev_opp = find_device_opp(dev);
271 if (IS_ERR(dev_opp)) { 284 if (IS_ERR(dev_opp)) {
272 int r = PTR_ERR(dev_opp); 285 int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
313 struct device_opp *dev_opp; 326 struct device_opp *dev_opp;
314 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 327 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
315 328
329 opp_rcu_lockdep_assert();
330
316 if (!dev || !freq) { 331 if (!dev || !freq) {
317 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 332 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
318 return ERR_PTR(-EINVAL); 333 return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
361 struct device_opp *dev_opp; 376 struct device_opp *dev_opp;
362 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 377 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
363 378
379 opp_rcu_lockdep_assert();
380
364 if (!dev || !freq) { 381 if (!dev || !freq) {
365 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 382 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
366 return ERR_PTR(-EINVAL); 383 return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
783 800
784 /* Check for existing list for 'dev' */ 801 /* Check for existing list for 'dev' */
785 dev_opp = find_device_opp(dev); 802 dev_opp = find_device_opp(dev);
786 if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev), 803 if (IS_ERR(dev_opp)) {
787 PTR_ERR(dev_opp))) 804 int error = PTR_ERR(dev_opp);
805 if (error != -ENODEV)
806 WARN(1, "%s: dev_opp: %d\n",
807 IS_ERR_OR_NULL(dev) ?
808 "Invalid device" : dev_name(dev),
809 error);
788 return; 810 return;
811 }
789 812
790 /* Hold our list modification lock here */ 813 /* Hold our list modification lock here */
791 mutex_lock(&dev_opp_list_lock); 814 mutex_lock(&dev_opp_list_lock);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ae9f615382f6..aa2224aa7caa 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -530,7 +530,7 @@ static int null_add_dev(void)
530 goto out_cleanup_queues; 530 goto out_cleanup_queues;
531 531
532 nullb->q = blk_mq_init_queue(&nullb->tag_set); 532 nullb->q = blk_mq_init_queue(&nullb->tag_set);
533 if (!nullb->q) { 533 if (IS_ERR(nullb->q)) {
534 rv = -ENOMEM; 534 rv = -ENOMEM;
535 goto out_cleanup_tags; 535 goto out_cleanup_tags;
536 } 536 }
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..cb529e9a82dd 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
215 cmd->fn = handler; 215 cmd->fn = handler;
216 cmd->ctx = ctx; 216 cmd->ctx = ctx;
217 cmd->aborted = 0; 217 cmd->aborted = 0;
218 blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
218} 219}
219 220
220/* Special values must be less than 0x1000 */ 221/* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
431 if (unlikely(status)) { 432 if (unlikely(status)) {
432 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 433 if (!(status & NVME_SC_DNR || blk_noretry_request(req))
433 && (jiffies - req->start_time) < req->timeout) { 434 && (jiffies - req->start_time) < req->timeout) {
435 unsigned long flags;
436
434 blk_mq_requeue_request(req); 437 blk_mq_requeue_request(req);
435 blk_mq_kick_requeue_list(req->q); 438 spin_lock_irqsave(req->q->queue_lock, flags);
439 if (!blk_queue_stopped(req->q))
440 blk_mq_kick_requeue_list(req->q);
441 spin_unlock_irqrestore(req->q->queue_lock, flags);
436 return; 442 return;
437 } 443 }
438 req->errors = nvme_error_status(status); 444 req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
664 } 670 }
665 } 671 }
666 672
667 blk_mq_start_request(req);
668
669 nvme_set_info(cmd, iod, req_completion); 673 nvme_set_info(cmd, iod, req_completion);
670 spin_lock_irq(&nvmeq->q_lock); 674 spin_lock_irq(&nvmeq->q_lock);
671 if (req->cmd_flags & REQ_DISCARD) 675 if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
835 if (IS_ERR(req)) 839 if (IS_ERR(req))
836 return PTR_ERR(req); 840 return PTR_ERR(req);
837 841
842 req->cmd_flags |= REQ_NO_TIMEOUT;
838 cmd_info = blk_mq_rq_to_pdu(req); 843 cmd_info = blk_mq_rq_to_pdu(req);
839 nvme_set_info(cmd_info, req, async_req_completion); 844 nvme_set_info(cmd_info, req, async_req_completion);
840 845
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
1016 struct nvme_command cmd; 1021 struct nvme_command cmd;
1017 1022
1018 if (!nvmeq->qid || cmd_rq->aborted) { 1023 if (!nvmeq->qid || cmd_rq->aborted) {
1024 unsigned long flags;
1025
1026 spin_lock_irqsave(&dev_list_lock, flags);
1019 if (work_busy(&dev->reset_work)) 1027 if (work_busy(&dev->reset_work))
1020 return; 1028 goto out;
1021 list_del_init(&dev->node); 1029 list_del_init(&dev->node);
1022 dev_warn(&dev->pci_dev->dev, 1030 dev_warn(&dev->pci_dev->dev,
1023 "I/O %d QID %d timeout, reset controller\n", 1031 "I/O %d QID %d timeout, reset controller\n",
1024 req->tag, nvmeq->qid); 1032 req->tag, nvmeq->qid);
1025 dev->reset_workfn = nvme_reset_failed_dev; 1033 dev->reset_workfn = nvme_reset_failed_dev;
1026 queue_work(nvme_workq, &dev->reset_work); 1034 queue_work(nvme_workq, &dev->reset_work);
1035 out:
1036 spin_unlock_irqrestore(&dev_list_lock, flags);
1027 return; 1037 return;
1028 } 1038 }
1029 1039
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
1064 void *ctx; 1074 void *ctx;
1065 nvme_completion_fn fn; 1075 nvme_completion_fn fn;
1066 struct nvme_cmd_info *cmd; 1076 struct nvme_cmd_info *cmd;
1067 static struct nvme_completion cqe = { 1077 struct nvme_completion cqe;
1068 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1078
1069 }; 1079 if (!blk_mq_request_started(req))
1080 return;
1070 1081
1071 cmd = blk_mq_rq_to_pdu(req); 1082 cmd = blk_mq_rq_to_pdu(req);
1072 1083
1073 if (cmd->ctx == CMD_CTX_CANCELLED) 1084 if (cmd->ctx == CMD_CTX_CANCELLED)
1074 return; 1085 return;
1075 1086
1087 if (blk_queue_dying(req->q))
1088 cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1089 else
1090 cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1091
1092
1076 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", 1093 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
1077 req->tag, nvmeq->qid); 1094 req->tag, nvmeq->qid);
1078 ctx = cancel_cmd_info(cmd, &fn); 1095 ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1084 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1101 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
1085 struct nvme_queue *nvmeq = cmd->nvmeq; 1102 struct nvme_queue *nvmeq = cmd->nvmeq;
1086 1103
1087 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1088 nvmeq->qid);
1089 if (nvmeq->dev->initialized)
1090 nvme_abort_req(req);
1091
1092 /* 1104 /*
1093 * The aborted req will be completed on receiving the abort req. 1105 * The aborted req will be completed on receiving the abort req.
1094 * We enable the timer again. If hit twice, it'll cause a device reset, 1106 * We enable the timer again. If hit twice, it'll cause a device reset,
1095 * as the device then is in a faulty state. 1107 * as the device then is in a faulty state.
1096 */ 1108 */
1097 return BLK_EH_RESET_TIMER; 1109 int ret = BLK_EH_RESET_TIMER;
1110
1111 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
1112 nvmeq->qid);
1113
1114 spin_lock_irq(&nvmeq->q_lock);
1115 if (!nvmeq->dev->initialized) {
1116 /*
1117 * Force cancelled command frees the request, which requires we
1118 * return BLK_EH_NOT_HANDLED.
1119 */
1120 nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
1121 ret = BLK_EH_NOT_HANDLED;
1122 } else
1123 nvme_abort_req(req);
1124 spin_unlock_irq(&nvmeq->q_lock);
1125
1126 return ret;
1098} 1127}
1099 1128
1100static void nvme_free_queue(struct nvme_queue *nvmeq) 1129static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1131 */ 1160 */
1132static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1161static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1133{ 1162{
1134 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1163 int vector;
1135 1164
1136 spin_lock_irq(&nvmeq->q_lock); 1165 spin_lock_irq(&nvmeq->q_lock);
1166 if (nvmeq->cq_vector == -1) {
1167 spin_unlock_irq(&nvmeq->q_lock);
1168 return 1;
1169 }
1170 vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
1137 nvmeq->dev->online_queues--; 1171 nvmeq->dev->online_queues--;
1172 nvmeq->cq_vector = -1;
1138 spin_unlock_irq(&nvmeq->q_lock); 1173 spin_unlock_irq(&nvmeq->q_lock);
1139 1174
1140 irq_set_affinity_hint(vector, NULL); 1175 irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1169 adapter_delete_sq(dev, qid); 1204 adapter_delete_sq(dev, qid);
1170 adapter_delete_cq(dev, qid); 1205 adapter_delete_cq(dev, qid);
1171 } 1206 }
1207 if (!qid && dev->admin_q)
1208 blk_mq_freeze_queue_start(dev->admin_q);
1172 nvme_clear_queue(nvmeq); 1209 nvme_clear_queue(nvmeq);
1173} 1210}
1174 1211
1175static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1212static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1176 int depth, int vector) 1213 int depth)
1177{ 1214{
1178 struct device *dmadev = &dev->pci_dev->dev; 1215 struct device *dmadev = &dev->pci_dev->dev;
1179 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); 1216 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1199 nvmeq->cq_phase = 1; 1236 nvmeq->cq_phase = 1;
1200 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1237 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1201 nvmeq->q_depth = depth; 1238 nvmeq->q_depth = depth;
1202 nvmeq->cq_vector = vector;
1203 nvmeq->qid = qid; 1239 nvmeq->qid = qid;
1204 dev->queue_count++; 1240 dev->queue_count++;
1205 dev->queues[qid] = nvmeq; 1241 dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1244 struct nvme_dev *dev = nvmeq->dev; 1280 struct nvme_dev *dev = nvmeq->dev;
1245 int result; 1281 int result;
1246 1282
1283 nvmeq->cq_vector = qid - 1;
1247 result = adapter_alloc_cq(dev, qid, nvmeq); 1284 result = adapter_alloc_cq(dev, qid, nvmeq);
1248 if (result < 0) 1285 if (result < 0)
1249 return result; 1286 return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
1355 .timeout = nvme_timeout, 1392 .timeout = nvme_timeout,
1356}; 1393};
1357 1394
1395static void nvme_dev_remove_admin(struct nvme_dev *dev)
1396{
1397 if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
1398 blk_cleanup_queue(dev->admin_q);
1399 blk_mq_free_tag_set(&dev->admin_tagset);
1400 }
1401}
1402
1358static int nvme_alloc_admin_tags(struct nvme_dev *dev) 1403static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1359{ 1404{
1360 if (!dev->admin_q) { 1405 if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1370 return -ENOMEM; 1415 return -ENOMEM;
1371 1416
1372 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); 1417 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
1373 if (!dev->admin_q) { 1418 if (IS_ERR(dev->admin_q)) {
1374 blk_mq_free_tag_set(&dev->admin_tagset); 1419 blk_mq_free_tag_set(&dev->admin_tagset);
1375 return -ENOMEM; 1420 return -ENOMEM;
1376 } 1421 }
1377 } 1422 if (!blk_get_queue(dev->admin_q)) {
1423 nvme_dev_remove_admin(dev);
1424 return -ENODEV;
1425 }
1426 } else
1427 blk_mq_unfreeze_queue(dev->admin_q);
1378 1428
1379 return 0; 1429 return 0;
1380} 1430}
1381 1431
1382static void nvme_free_admin_tags(struct nvme_dev *dev)
1383{
1384 if (dev->admin_q)
1385 blk_mq_free_tag_set(&dev->admin_tagset);
1386}
1387
1388static int nvme_configure_admin_queue(struct nvme_dev *dev) 1432static int nvme_configure_admin_queue(struct nvme_dev *dev)
1389{ 1433{
1390 int result; 1434 int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1416 1460
1417 nvmeq = dev->queues[0]; 1461 nvmeq = dev->queues[0];
1418 if (!nvmeq) { 1462 if (!nvmeq) {
1419 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); 1463 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1420 if (!nvmeq) 1464 if (!nvmeq)
1421 return -ENOMEM; 1465 return -ENOMEM;
1422 } 1466 }
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1439 if (result) 1483 if (result)
1440 goto free_nvmeq; 1484 goto free_nvmeq;
1441 1485
1442 result = nvme_alloc_admin_tags(dev); 1486 nvmeq->cq_vector = 0;
1443 if (result)
1444 goto free_nvmeq;
1445
1446 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1487 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1447 if (result) 1488 if (result)
1448 goto free_tags; 1489 goto free_nvmeq;
1449 1490
1450 return result; 1491 return result;
1451 1492
1452 free_tags:
1453 nvme_free_admin_tags(dev);
1454 free_nvmeq: 1493 free_nvmeq:
1455 nvme_free_queues(dev, 0); 1494 nvme_free_queues(dev, 0);
1456 return result; 1495 return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
1944 unsigned i; 1983 unsigned i;
1945 1984
1946 for (i = dev->queue_count; i <= dev->max_qid; i++) 1985 for (i = dev->queue_count; i <= dev->max_qid; i++)
1947 if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) 1986 if (!nvme_alloc_queue(dev, i, dev->q_depth))
1948 break; 1987 break;
1949 1988
1950 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) 1989 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
2235 break; 2274 break;
2236 if (!schedule_timeout(ADMIN_TIMEOUT) || 2275 if (!schedule_timeout(ADMIN_TIMEOUT) ||
2237 fatal_signal_pending(current)) { 2276 fatal_signal_pending(current)) {
2277 /*
2278 * Disable the controller first since we can't trust it
2279 * at this point, but leave the admin queue enabled
2280 * until all queue deletion requests are flushed.
2281 * FIXME: This may take a while if there are more h/w
2282 * queues than admin tags.
2283 */
2238 set_current_state(TASK_RUNNING); 2284 set_current_state(TASK_RUNNING);
2239
2240 nvme_disable_ctrl(dev, readq(&dev->bar->cap)); 2285 nvme_disable_ctrl(dev, readq(&dev->bar->cap));
2241 nvme_disable_queue(dev, 0); 2286 nvme_clear_queue(dev->queues[0]);
2242
2243 send_sig(SIGKILL, dq->worker->task, 1);
2244 flush_kthread_worker(dq->worker); 2287 flush_kthread_worker(dq->worker);
2288 nvme_disable_queue(dev, 0);
2245 return; 2289 return;
2246 } 2290 }
2247 } 2291 }
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
2318{ 2362{
2319 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2363 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2320 cmdinfo.work); 2364 cmdinfo.work);
2321 allow_signal(SIGKILL);
2322 if (nvme_delete_sq(nvmeq)) 2365 if (nvme_delete_sq(nvmeq))
2323 nvme_del_queue_end(nvmeq); 2366 nvme_del_queue_end(nvmeq);
2324} 2367}
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
2376 kthread_stop(tmp); 2419 kthread_stop(tmp);
2377} 2420}
2378 2421
2422static void nvme_freeze_queues(struct nvme_dev *dev)
2423{
2424 struct nvme_ns *ns;
2425
2426 list_for_each_entry(ns, &dev->namespaces, list) {
2427 blk_mq_freeze_queue_start(ns->queue);
2428
2429 spin_lock(ns->queue->queue_lock);
2430 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
2431 spin_unlock(ns->queue->queue_lock);
2432
2433 blk_mq_cancel_requeue_work(ns->queue);
2434 blk_mq_stop_hw_queues(ns->queue);
2435 }
2436}
2437
2438static void nvme_unfreeze_queues(struct nvme_dev *dev)
2439{
2440 struct nvme_ns *ns;
2441
2442 list_for_each_entry(ns, &dev->namespaces, list) {
2443 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
2444 blk_mq_unfreeze_queue(ns->queue);
2445 blk_mq_start_stopped_hw_queues(ns->queue, true);
2446 blk_mq_kick_requeue_list(ns->queue);
2447 }
2448}
2449
2379static void nvme_dev_shutdown(struct nvme_dev *dev) 2450static void nvme_dev_shutdown(struct nvme_dev *dev)
2380{ 2451{
2381 int i; 2452 int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2384 dev->initialized = 0; 2455 dev->initialized = 0;
2385 nvme_dev_list_remove(dev); 2456 nvme_dev_list_remove(dev);
2386 2457
2387 if (dev->bar) 2458 if (dev->bar) {
2459 nvme_freeze_queues(dev);
2388 csts = readl(&dev->bar->csts); 2460 csts = readl(&dev->bar->csts);
2461 }
2389 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 2462 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
2390 for (i = dev->queue_count - 1; i >= 0; i--) { 2463 for (i = dev->queue_count - 1; i >= 0; i--) {
2391 struct nvme_queue *nvmeq = dev->queues[i]; 2464 struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
2400 nvme_dev_unmap(dev); 2473 nvme_dev_unmap(dev);
2401} 2474}
2402 2475
2403static void nvme_dev_remove_admin(struct nvme_dev *dev)
2404{
2405 if (dev->admin_q && !blk_queue_dying(dev->admin_q))
2406 blk_cleanup_queue(dev->admin_q);
2407}
2408
2409static void nvme_dev_remove(struct nvme_dev *dev) 2476static void nvme_dev_remove(struct nvme_dev *dev)
2410{ 2477{
2411 struct nvme_ns *ns; 2478 struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2413 list_for_each_entry(ns, &dev->namespaces, list) { 2480 list_for_each_entry(ns, &dev->namespaces, list) {
2414 if (ns->disk->flags & GENHD_FL_UP) 2481 if (ns->disk->flags & GENHD_FL_UP)
2415 del_gendisk(ns->disk); 2482 del_gendisk(ns->disk);
2416 if (!blk_queue_dying(ns->queue)) 2483 if (!blk_queue_dying(ns->queue)) {
2484 blk_mq_abort_requeue_list(ns->queue);
2417 blk_cleanup_queue(ns->queue); 2485 blk_cleanup_queue(ns->queue);
2486 }
2418 } 2487 }
2419} 2488}
2420 2489
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
2495 nvme_free_namespaces(dev); 2564 nvme_free_namespaces(dev);
2496 nvme_release_instance(dev); 2565 nvme_release_instance(dev);
2497 blk_mq_free_tag_set(&dev->tagset); 2566 blk_mq_free_tag_set(&dev->tagset);
2567 blk_put_queue(dev->admin_q);
2498 kfree(dev->queues); 2568 kfree(dev->queues);
2499 kfree(dev->entry); 2569 kfree(dev->entry);
2500 kfree(dev); 2570 kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
2591 } 2661 }
2592 2662
2593 nvme_init_queue(dev->queues[0], 0); 2663 nvme_init_queue(dev->queues[0], 0);
2664 result = nvme_alloc_admin_tags(dev);
2665 if (result)
2666 goto disable;
2594 2667
2595 result = nvme_setup_io_queues(dev); 2668 result = nvme_setup_io_queues(dev);
2596 if (result) 2669 if (result)
2597 goto disable; 2670 goto free_tags;
2598 2671
2599 nvme_set_irq_hints(dev); 2672 nvme_set_irq_hints(dev);
2600 2673
2601 return result; 2674 return result;
2602 2675
2676 free_tags:
2677 nvme_dev_remove_admin(dev);
2603 disable: 2678 disable:
2604 nvme_disable_queue(dev, 0); 2679 nvme_disable_queue(dev, 0);
2605 nvme_dev_list_remove(dev); 2680 nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
2639 dev->reset_workfn = nvme_remove_disks; 2714 dev->reset_workfn = nvme_remove_disks;
2640 queue_work(nvme_workq, &dev->reset_work); 2715 queue_work(nvme_workq, &dev->reset_work);
2641 spin_unlock(&dev_list_lock); 2716 spin_unlock(&dev_list_lock);
2717 } else {
2718 nvme_unfreeze_queues(dev);
2719 nvme_set_irq_hints(dev);
2642 } 2720 }
2643 dev->initialized = 1; 2721 dev->initialized = 1;
2644 return 0; 2722 return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
2776 pci_set_drvdata(pdev, NULL); 2854 pci_set_drvdata(pdev, NULL);
2777 flush_work(&dev->reset_work); 2855 flush_work(&dev->reset_work);
2778 misc_deregister(&dev->miscdev); 2856 misc_deregister(&dev->miscdev);
2779 nvme_dev_remove(dev);
2780 nvme_dev_shutdown(dev); 2857 nvme_dev_shutdown(dev);
2858 nvme_dev_remove(dev);
2781 nvme_dev_remove_admin(dev); 2859 nvme_dev_remove_admin(dev);
2782 nvme_free_queues(dev, 0); 2860 nvme_free_queues(dev, 0);
2783 nvme_free_admin_tags(dev);
2784 nvme_release_prp_pools(dev); 2861 nvme_release_prp_pools(dev);
2785 kref_put(&dev->kref, nvme_free_dev); 2862 kref_put(&dev->kref, nvme_free_dev);
2786} 2863}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c098708f..cdfbd21e3597 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
638 goto out_put_disk; 638 goto out_put_disk;
639 639
640 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); 640 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
641 if (!q) { 641 if (IS_ERR(q)) {
642 err = -ENOMEM; 642 err = -ENOMEM;
643 goto out_free_tags; 643 goto out_free_tags;
644 } 644 }
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 860da40b78ef..0ce5e2d65a06 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1312,6 +1312,9 @@ static int cci_probe(void)
1312 if (!np) 1312 if (!np)
1313 return -ENODEV; 1313 return -ENODEV;
1314 1314
1315 if (!of_device_is_available(np))
1316 return -ENODEV;
1317
1315 cci_config = of_match_node(arm_cci_matches, np)->data; 1318 cci_config = of_match_node(arm_cci_matches, np)->data;
1316 if (!cci_config) 1319 if (!cci_config)
1317 return -ENODEV; 1320 return -ENODEV;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 19db03667650..dcbbb4ea3cc1 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
417module_init(agp_ali_init); 417module_init(agp_ali_init);
418module_exit(agp_ali_cleanup); 418module_exit(agp_ali_cleanup);
419 419
420MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 420MODULE_AUTHOR("Dave Jones");
421MODULE_LICENSE("GPL and additional rights"); 421MODULE_LICENSE("GPL and additional rights");
422 422
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3b47ed0310e1..0ef350010766 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
813module_init(agp_amd64_mod_init); 813module_init(agp_amd64_mod_init);
814module_exit(agp_amd64_cleanup); 814module_exit(agp_amd64_cleanup);
815 815
816MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); 816MODULE_AUTHOR("Dave Jones, Andi Kleen");
817module_param(agp_try_unsupported, bool, 0); 817module_param(agp_try_unsupported, bool, 0);
818MODULE_LICENSE("GPL"); 818MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 18a7a6baa304..75a9786a77e6 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
579module_init(agp_ati_init); 579module_init(agp_ati_init);
580module_exit(agp_ati_cleanup); 580module_exit(agp_ati_cleanup);
581 581
582MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 582MODULE_AUTHOR("Dave Jones");
583MODULE_LICENSE("GPL and additional rights"); 583MODULE_LICENSE("GPL and additional rights");
584 584
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28ce8328..38ffb281df97 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
356__setup("agp=", agp_setup); 356__setup("agp=", agp_setup);
357#endif 357#endif
358 358
359MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 359MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
360MODULE_DESCRIPTION("AGP GART driver"); 360MODULE_DESCRIPTION("AGP GART driver");
361MODULE_LICENSE("GPL and additional rights"); 361MODULE_LICENSE("GPL and additional rights");
362MODULE_ALIAS_MISCDEV(AGPGART_MINOR); 362MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f9b9ca5d31b7..0a21daed5b62 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
920module_init(agp_intel_init); 920module_init(agp_intel_init);
921module_exit(agp_intel_cleanup); 921module_exit(agp_intel_cleanup);
922 922
923MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 923MODULE_AUTHOR("Dave Jones, Various @Intel");
924MODULE_LICENSE("GPL and additional rights"); 924MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index f3334829e55a..92aa43fa8d70 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
1438} 1438}
1439EXPORT_SYMBOL(intel_gmch_remove); 1439EXPORT_SYMBOL(intel_gmch_remove);
1440 1440
1441MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 1441MODULE_AUTHOR("Dave Jones, Various @Intel");
1442MODULE_LICENSE("GPL and additional rights"); 1442MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index a1861b75eb31..6c8d39cb566e 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Nvidia AGPGART routines. 2 * Nvidia AGPGART routines.
3 * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up 3 * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
4 * to work in 2.5 by Dave Jones <davej@redhat.com> 4 * to work in 2.5 by Dave Jones.
5 */ 5 */
6 6
7#include <linux/module.h> 7#include <linux/module.h>
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 228f20cddc05..a4961d35e940 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@ module_init(agp_via_init);
595module_exit(agp_via_cleanup); 595module_exit(agp_via_cleanup);
596 596
597MODULE_LICENSE("GPL"); 597MODULE_LICENSE("GPL");
598MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 598MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5fa83f751378..6b65fa4e0c55 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -199,18 +199,6 @@ struct bmc_device {
199 int guid_set; 199 int guid_set;
200 char name[16]; 200 char name[16];
201 struct kref usecount; 201 struct kref usecount;
202
203 /* bmc device attributes */
204 struct device_attribute device_id_attr;
205 struct device_attribute provides_dev_sdrs_attr;
206 struct device_attribute revision_attr;
207 struct device_attribute firmware_rev_attr;
208 struct device_attribute version_attr;
209 struct device_attribute add_dev_support_attr;
210 struct device_attribute manufacturer_id_attr;
211 struct device_attribute product_id_attr;
212 struct device_attribute guid_attr;
213 struct device_attribute aux_firmware_rev_attr;
214}; 202};
215#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 203#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
216 204
@@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
2252 2240
2253 return snprintf(buf, 10, "%u\n", bmc->id.device_id); 2241 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2254} 2242}
2255DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL); 2243static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
2256 2244
2257static ssize_t provides_device_sdrs_show(struct device *dev, 2245static ssize_t provides_device_sdrs_show(struct device *dev,
2258 struct device_attribute *attr, 2246 struct device_attribute *attr,
@@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
2263 return snprintf(buf, 10, "%u\n", 2251 return snprintf(buf, 10, "%u\n",
2264 (bmc->id.device_revision & 0x80) >> 7); 2252 (bmc->id.device_revision & 0x80) >> 7);
2265} 2253}
2266DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL); 2254static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
2255 NULL);
2267 2256
2268static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2257static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2269 char *buf) 2258 char *buf)
@@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2273 return snprintf(buf, 20, "%u\n", 2262 return snprintf(buf, 20, "%u\n",
2274 bmc->id.device_revision & 0x0F); 2263 bmc->id.device_revision & 0x0F);
2275} 2264}
2276DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL); 2265static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
2277 2266
2278static ssize_t firmware_revision_show(struct device *dev, 2267static ssize_t firmware_revision_show(struct device *dev,
2279 struct device_attribute *attr, 2268 struct device_attribute *attr,
@@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
2284 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, 2273 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2285 bmc->id.firmware_revision_2); 2274 bmc->id.firmware_revision_2);
2286} 2275}
2287DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL); 2276static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
2288 2277
2289static ssize_t ipmi_version_show(struct device *dev, 2278static ssize_t ipmi_version_show(struct device *dev,
2290 struct device_attribute *attr, 2279 struct device_attribute *attr,
@@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
2296 ipmi_version_major(&bmc->id), 2285 ipmi_version_major(&bmc->id),
2297 ipmi_version_minor(&bmc->id)); 2286 ipmi_version_minor(&bmc->id));
2298} 2287}
2299DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL); 2288static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
2300 2289
2301static ssize_t add_dev_support_show(struct device *dev, 2290static ssize_t add_dev_support_show(struct device *dev,
2302 struct device_attribute *attr, 2291 struct device_attribute *attr,
@@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
2307 return snprintf(buf, 10, "0x%02x\n", 2296 return snprintf(buf, 10, "0x%02x\n",
2308 bmc->id.additional_device_support); 2297 bmc->id.additional_device_support);
2309} 2298}
2310DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL); 2299static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2300 NULL);
2311 2301
2312static ssize_t manufacturer_id_show(struct device *dev, 2302static ssize_t manufacturer_id_show(struct device *dev,
2313 struct device_attribute *attr, 2303 struct device_attribute *attr,
@@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
2317 2307
2318 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); 2308 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2319} 2309}
2320DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL); 2310static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
2321 2311
2322static ssize_t product_id_show(struct device *dev, 2312static ssize_t product_id_show(struct device *dev,
2323 struct device_attribute *attr, 2313 struct device_attribute *attr,
@@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
2327 2317
2328 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); 2318 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2329} 2319}
2330DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL); 2320static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
2331 2321
2332static ssize_t aux_firmware_rev_show(struct device *dev, 2322static ssize_t aux_firmware_rev_show(struct device *dev,
2333 struct device_attribute *attr, 2323 struct device_attribute *attr,
@@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
2341 bmc->id.aux_firmware_revision[1], 2331 bmc->id.aux_firmware_revision[1],
2342 bmc->id.aux_firmware_revision[0]); 2332 bmc->id.aux_firmware_revision[0]);
2343} 2333}
2344DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2334static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2345 2335
2346static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2336static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2347 char *buf) 2337 char *buf)
@@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2352 (long long) bmc->guid[0], 2342 (long long) bmc->guid[0],
2353 (long long) bmc->guid[8]); 2343 (long long) bmc->guid[8]);
2354} 2344}
2355DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL); 2345static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
2356 2346
2357static struct attribute *bmc_dev_attrs[] = { 2347static struct attribute *bmc_dev_attrs[] = {
2358 &dev_attr_device_id.attr, 2348 &dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
2392 2382
2393 if (bmc->id.aux_firmware_revision_set) 2383 if (bmc->id.aux_firmware_revision_set)
2394 device_remove_file(&bmc->pdev.dev, 2384 device_remove_file(&bmc->pdev.dev,
2395 &bmc->aux_firmware_rev_attr); 2385 &dev_attr_aux_firmware_revision);
2396 if (bmc->guid_set) 2386 if (bmc->guid_set)
2397 device_remove_file(&bmc->pdev.dev, 2387 device_remove_file(&bmc->pdev.dev,
2398 &bmc->guid_attr); 2388 &dev_attr_guid);
2399 2389
2400 platform_device_unregister(&bmc->pdev); 2390 platform_device_unregister(&bmc->pdev);
2401} 2391}
@@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
2422 int err; 2412 int err;
2423 2413
2424 if (bmc->id.aux_firmware_revision_set) { 2414 if (bmc->id.aux_firmware_revision_set) {
2425 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2426 err = device_create_file(&bmc->pdev.dev, 2415 err = device_create_file(&bmc->pdev.dev,
2427 &bmc->aux_firmware_rev_attr); 2416 &dev_attr_aux_firmware_revision);
2428 if (err) 2417 if (err)
2429 goto out; 2418 goto out;
2430 } 2419 }
2431 if (bmc->guid_set) { 2420 if (bmc->guid_set) {
2432 bmc->guid_attr.attr.name = "guid";
2433 err = device_create_file(&bmc->pdev.dev, 2421 err = device_create_file(&bmc->pdev.dev,
2434 &bmc->guid_attr); 2422 &dev_attr_guid);
2435 if (err) 2423 if (err)
2436 goto out_aux_firm; 2424 goto out_aux_firm;
2437 } 2425 }
@@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
2441out_aux_firm: 2429out_aux_firm:
2442 if (bmc->id.aux_firmware_revision_set) 2430 if (bmc->id.aux_firmware_revision_set)
2443 device_remove_file(&bmc->pdev.dev, 2431 device_remove_file(&bmc->pdev.dev,
2444 &bmc->aux_firmware_rev_attr); 2432 &dev_attr_aux_firmware_revision);
2445out: 2433out:
2446 return err; 2434 return err;
2447} 2435}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e178ac27e73c..982b96323f82 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
52#include <linux/dmi.h> 52#include <linux/dmi.h>
53#include <linux/kthread.h> 53#include <linux/kthread.h>
54#include <linux/acpi.h> 54#include <linux/acpi.h>
55#include <linux/ctype.h>
55 56
56#define PFX "ipmi_ssif: " 57#define PFX "ipmi_ssif: "
57#define DEVICE_NAME "ipmi_ssif" 58#define DEVICE_NAME "ipmi_ssif"
@@ -968,7 +969,8 @@ static void sender(void *send_info,
968 969
969 do_gettimeofday(&t); 970 do_gettimeofday(&t);
970 pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", 971 pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
971 msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); 972 msg->data[0], msg->data[1],
973 (long) t.tv_sec, (long) t.tv_usec);
972 } 974 }
973} 975}
974 976
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 32f7c1b36204..2f13bd5246b5 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
70 70
71#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw) 71#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
72 72
73static struct clk *slow_clk;
73 74
74static int clk_slow_osc_prepare(struct clk_hw *hw) 75static int clk_slow_osc_prepare(struct clk_hw *hw)
75{ 76{
@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
357 clk = clk_register(NULL, &slowck->hw); 358 clk = clk_register(NULL, &slowck->hw);
358 if (IS_ERR(clk)) 359 if (IS_ERR(clk))
359 kfree(slowck); 360 kfree(slowck);
361 else
362 slow_clk = clk;
360 363
361 return clk; 364 return clk;
362} 365}
@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
433 clk = clk_register(NULL, &slowck->hw); 436 clk = clk_register(NULL, &slowck->hw);
434 if (IS_ERR(clk)) 437 if (IS_ERR(clk))
435 kfree(slowck); 438 kfree(slowck);
439 else
440 slow_clk = clk;
436 441
437 return clk; 442 return clk;
438} 443}
@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
465 470
466 of_clk_add_provider(np, of_clk_src_simple_get, clk); 471 of_clk_add_provider(np, of_clk_src_simple_get, clk);
467} 472}
473
474/*
475 * FIXME: All slow clk users are not properly claiming it (get + prepare +
476 * enable) before using it.
477 * If all users properly claiming this clock decide that they don't need it
478 * anymore (or are removed), it is disabled while faulty users are still
479 * requiring it, and the system hangs.
480 * Prevent this clock from being disabled until all users are properly
481 * requesting it.
482 * Once this is done we should remove this function and the slow_clk variable.
483 */
484static int __init of_at91_clk_slow_retain(void)
485{
486 if (!slow_clk)
487 return 0;
488
489 __clk_get(slow_clk);
490 clk_prepare_enable(slow_clk);
491
492 return 0;
493}
494arch_initcall(of_at91_clk_slow_retain);
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 21784e4eb3f0..440ef81ab15c 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
285 { "pbridge", "perif", 15, CLK_IGNORE_UNUSED }, 285 { "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
286 { "sdio", "perif", 16, CLK_IGNORE_UNUSED }, 286 { "sdio", "perif", 16, CLK_IGNORE_UNUSED },
287 { "nfc", "perif", 18 }, 287 { "nfc", "perif", 18 },
288 { "smemc", "perif", 19 },
289 { "pcie", "perif", 22 }, 288 { "pcie", "perif", 22 },
290}; 289};
291 290
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index b6e6c85507a5..0a47d6f49cd6 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
291 {} 291 {}
292}; 292};
293 293
294static struct platform_driver ppc_corenet_clk_driver __initdata = { 294static struct platform_driver ppc_corenet_clk_driver = {
295 .driver = { 295 .driver = {
296 .name = "ppc_corenet_clock", 296 .name = "ppc_corenet_clock",
297 .of_match_table = ppc_clk_ids, 297 .of_match_table = ppc_clk_ids,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f4963b7d4e17..d48ac71c6c8b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1366 new_rate = clk->ops->determine_rate(clk->hw, rate, 1366 new_rate = clk->ops->determine_rate(clk->hw, rate,
1367 &best_parent_rate, 1367 &best_parent_rate,
1368 &parent_hw); 1368 &parent_hw);
1369 parent = parent_hw->clk; 1369 parent = parent_hw ? parent_hw->clk : NULL;
1370 } else if (clk->ops->round_rate) { 1370 } else if (clk->ops->round_rate) {
1371 new_rate = clk->ops->round_rate(clk->hw, rate, 1371 new_rate = clk->ops->round_rate(clk->hw, rate,
1372 &best_parent_rate); 1372 &best_parent_rate);
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 75c8c45ef728..8539c4fd34cc 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
124{ 124{
125 const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; 125 const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
126 unsigned long alt_prate, alt_div; 126 unsigned long alt_prate, alt_div;
127 unsigned long flags;
127 128
128 alt_prate = clk_get_rate(cpuclk->alt_parent); 129 alt_prate = clk_get_rate(cpuclk->alt_parent);
129 130
130 spin_lock(cpuclk->lock); 131 spin_lock_irqsave(cpuclk->lock, flags);
131 132
132 /* 133 /*
133 * If the old parent clock speed is less than the clock speed 134 * If the old parent clock speed is less than the clock speed
@@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
164 cpuclk->reg_base + reg_data->core_reg); 165 cpuclk->reg_base + reg_data->core_reg);
165 } 166 }
166 167
167 spin_unlock(cpuclk->lock); 168 spin_unlock_irqrestore(cpuclk->lock, flags);
168 return 0; 169 return 0;
169} 170}
170 171
@@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
173{ 174{
174 const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; 175 const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
175 const struct rockchip_cpuclk_rate_table *rate; 176 const struct rockchip_cpuclk_rate_table *rate;
177 unsigned long flags;
176 178
177 rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); 179 rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
178 if (!rate) { 180 if (!rate) {
@@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
181 return -EINVAL; 183 return -EINVAL;
182 } 184 }
183 185
184 spin_lock(cpuclk->lock); 186 spin_lock_irqsave(cpuclk->lock, flags);
185 187
186 if (ndata->old_rate < ndata->new_rate) 188 if (ndata->old_rate < ndata->new_rate)
187 rockchip_cpuclk_set_dividers(cpuclk, rate); 189 rockchip_cpuclk_set_dividers(cpuclk, rate);
@@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
201 if (ndata->old_rate > ndata->new_rate) 203 if (ndata->old_rate > ndata->new_rate)
202 rockchip_cpuclk_set_dividers(cpuclk, rate); 204 rockchip_cpuclk_set_dividers(cpuclk, rate);
203 205
204 spin_unlock(cpuclk->lock); 206 spin_unlock_irqrestore(cpuclk->lock, flags);
205 return 0; 207 return 0;
206} 208}
207 209
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index c54078960847..7eb684c50d42 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
210PNAME(mux_mac_p) = { "gpll", "dpll" }; 210PNAME(mux_mac_p) = { "gpll", "dpll" };
211PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" }; 211PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
212 212
213static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
214 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
215 RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates),
216 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
217 RK2928_MODE_CON, 4, 4, 0, NULL),
218 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
219 RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
220 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
221 RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
222};
223
213static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = { 224static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
214 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), 225 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
215 RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates), 226 RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
@@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
427 /* hclk_peri gates */ 438 /* hclk_peri gates */
428 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS), 439 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
429 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS), 440 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
430 GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS), 441 GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS),
431 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS), 442 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
432 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS), 443 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
433 GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS), 444 GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS),
434 GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS), 445 GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
435 GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS), 446 GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
436 GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS), 447 GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
437 GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS), 448 GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
@@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
592 GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS), 603 GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
593 GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), 604 GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
594 605
595 GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS), 606 GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
607 RK2928_CLKGATE_CON(5), 14, GFLAGS),
596 608
597 GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS), 609 GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
598 610
@@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
680 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), 692 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
681 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS), 693 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
682 694
683 GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), 695 GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
696 RK2928_CLKGATE_CON(7), 3, GFLAGS),
684 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), 697 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
685 698
686 GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS), 699 GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
@@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
735static void __init rk3066a_clk_init(struct device_node *np) 748static void __init rk3066a_clk_init(struct device_node *np)
736{ 749{
737 rk3188_common_clk_init(np); 750 rk3188_common_clk_init(np);
738 rockchip_clk_register_plls(rk3188_pll_clks, 751 rockchip_clk_register_plls(rk3066_pll_clks,
739 ARRAY_SIZE(rk3188_pll_clks), 752 ARRAY_SIZE(rk3066_pll_clks),
740 RK3066_GRF_SOC_STATUS); 753 RK3066_GRF_SOC_STATUS);
741 rockchip_clk_register_branches(rk3066a_clk_branches, 754 rockchip_clk_register_branches(rk3066a_clk_branches,
742 ARRAY_SIZE(rk3066a_clk_branches)); 755 ARRAY_SIZE(rk3066a_clk_branches));
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index ac6be7c0132d..11194b8329fe 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
145 } 145 }
146 146
147static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = { 147static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
148 RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4), 148 RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
149 RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4), 149 RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
150 RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4), 150 RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
151 RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4), 151 RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
152 RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4), 152 RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
153 RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4), 153 RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
154 RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4), 154 RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
155 RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4), 155 RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
156 RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4), 156 RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
157 RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4), 157 RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
158 RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4), 158 RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
159 RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4), 159 RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
160 RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4), 160 RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
161 RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4), 161 RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
162}; 162};
163 163
164static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = { 164static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6a79fc4f900c..095c1774592c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
462 462
463 /* Register the CP15 based counter if we have one */ 463 /* Register the CP15 based counter if we have one */
464 if (type & ARCH_CP15_TIMER) { 464 if (type & ARCH_CP15_TIMER) {
465 if (arch_timer_use_virtual) 465 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
466 arch_timer_read_counter = arch_counter_get_cntvct; 466 arch_timer_read_counter = arch_counter_get_cntvct;
467 else 467 else
468 arch_timer_read_counter = arch_counter_get_cntpct; 468 arch_timer_read_counter = arch_counter_get_cntpct;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f56147a1daed..fde97d6e31d6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
211 /* OPPs might be populated at runtime, don't check for error here */ 211 /* OPPs might be populated at runtime, don't check for error here */
212 of_init_opp_table(cpu_dev); 212 of_init_opp_table(cpu_dev);
213 213
214 /*
215 * But we need OPP table to function so if it is not there let's
216 * give platform code chance to provide it for us.
217 */
218 ret = dev_pm_opp_get_opp_count(cpu_dev);
219 if (ret <= 0) {
220 pr_debug("OPP table is not ready, deferring probe\n");
221 ret = -EPROBE_DEFER;
222 goto out_free_opp;
223 }
224
214 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 225 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
215 if (!priv) { 226 if (!priv) {
216 ret = -ENOMEM; 227 ret = -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a09a29c312a9..46bed4f81cde 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
2028 /* Don't start any governor operations if we are entering suspend */ 2028 /* Don't start any governor operations if we are entering suspend */
2029 if (cpufreq_suspended) 2029 if (cpufreq_suspended)
2030 return 0; 2030 return 0;
2031 /*
2032 * Governor might not be initiated here if ACPI _PPC changed
2033 * notification happened, so check it.
2034 */
2035 if (!policy->governor)
2036 return -EINVAL;
2031 2037
2032 if (policy->governor->max_transition_latency && 2038 if (policy->governor->max_transition_latency &&
2033 policy->cpuinfo.transition_latency > 2039 policy->cpuinfo.transition_latency >
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 37263d9a1051..401c0106ed34 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
79 79
80 last_state = &ldev->states[last_idx]; 80 last_state = &ldev->states[last_idx];
81 81
82 if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) { 82 last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
83 last_residency = cpuidle_get_last_residency(dev) - \
84 drv->states[last_idx].exit_latency;
85 }
86 else
87 last_residency = last_state->threshold.promotion_time + 1;
88 83
89 /* consider promotion */ 84 /* consider promotion */
90 if (last_idx < drv->state_count - 1 && 85 if (last_idx < drv->state_count - 1 &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 659d7b0c9ebf..40580794e23d 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
396 * power state and occurrence of the wakeup event. 396 * power state and occurrence of the wakeup event.
397 * 397 *
398 * If the entered idle state didn't support residency measurements, 398 * If the entered idle state didn't support residency measurements,
399 * we are basically lost in the dark how much time passed. 399 * we use them anyway if they are short, and if long,
400 * As a compromise, assume we slept for the whole expected time. 400 * truncate to the whole expected time.
401 * 401 *
402 * Any measured amount of time will include the exit latency. 402 * Any measured amount of time will include the exit latency.
403 * Since we are interested in when the wakeup begun, not when it 403 * Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
405 * the measured amount of time is less than the exit latency, 405 * the measured amount of time is less than the exit latency,
406 * assume the state was never reached and the exit latency is 0. 406 * assume the state was never reached and the exit latency is 0.
407 */ 407 */
408 if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
409 /* Use timer value as is */
410 measured_us = data->next_timer_us;
411 408
412 } else { 409 /* measured value */
413 /* Use measured value */ 410 measured_us = cpuidle_get_last_residency(dev);
414 measured_us = cpuidle_get_last_residency(dev);
415 411
416 /* Deduct exit latency */ 412 /* Deduct exit latency */
417 if (measured_us > target->exit_latency) 413 if (measured_us > target->exit_latency)
418 measured_us -= target->exit_latency; 414 measured_us -= target->exit_latency;
419 415
420 /* Make sure our coefficients do not exceed unity */ 416 /* Make sure our coefficients do not exceed unity */
421 if (measured_us > data->next_timer_us) 417 if (measured_us > data->next_timer_us)
422 measured_us = data->next_timer_us; 418 measured_us = data->next_timer_us;
423 }
424 419
425 /* Update our correction ratio */ 420 /* Update our correction ratio */
426 new_factor = data->correction_factor[data->bucket]; 421 new_factor = data->correction_factor[data->bucket];
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 380478562b7d..5c062548957c 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1505 dw->regs = chip->regs; 1505 dw->regs = chip->regs;
1506 chip->dw = dw; 1506 chip->dw = dw;
1507 1507
1508 pm_runtime_enable(chip->dev);
1509 pm_runtime_get_sync(chip->dev); 1508 pm_runtime_get_sync(chip->dev);
1510 1509
1511 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1510 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
@@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1703 } 1702 }
1704 1703
1705 pm_runtime_put_sync_suspend(chip->dev); 1704 pm_runtime_put_sync_suspend(chip->dev);
1706 pm_runtime_disable(chip->dev);
1707 return 0; 1705 return 0;
1708} 1706}
1709EXPORT_SYMBOL_GPL(dw_dma_remove); 1707EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index a630161473a4..32ea1aca7a0e 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
19#include <linux/dmaengine.h> 20#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
@@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev)
185 if (err) 186 if (err)
186 return err; 187 return err;
187 188
189 pm_runtime_enable(&pdev->dev);
190
188 err = dw_dma_probe(chip, pdata); 191 err = dw_dma_probe(chip, pdata);
189 if (err) 192 if (err)
190 goto err_dw_dma_probe; 193 goto err_dw_dma_probe;
@@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev)
205 return 0; 208 return 0;
206 209
207err_dw_dma_probe: 210err_dw_dma_probe:
211 pm_runtime_disable(&pdev->dev);
208 clk_disable_unprepare(chip->clk); 212 clk_disable_unprepare(chip->clk);
209 return err; 213 return err;
210} 214}
@@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev)
217 of_dma_controller_free(pdev->dev.of_node); 221 of_dma_controller_free(pdev->dev.of_node);
218 222
219 dw_dma_remove(chip); 223 dw_dma_remove(chip);
224 pm_runtime_disable(&pdev->dev);
220 clk_disable_unprepare(chip->clk); 225 clk_disable_unprepare(chip->clk);
221 226
222 return 0; 227 return 0;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 55d4803d71b0..3d9e08f7e823 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
272 for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { 272 for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
273 if (pending & BIT(gpio)) { 273 if (pending & BIT(gpio)) {
274 virq = irq_find_mapping(cg->chip.irqdomain, gpio); 274 virq = irq_find_mapping(cg->chip.irqdomain, gpio);
275 generic_handle_irq(virq); 275 handle_nested_irq(virq);
276 } 276 }
277 } 277 }
278 278
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 978b51eae2ec..ce3c1558cb0a 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -47,13 +47,6 @@
47 47
48#define DLN2_GPIO_MAX_PINS 32 48#define DLN2_GPIO_MAX_PINS 32
49 49
50struct dln2_irq_work {
51 struct work_struct work;
52 struct dln2_gpio *dln2;
53 int pin;
54 int type;
55};
56
57struct dln2_gpio { 50struct dln2_gpio {
58 struct platform_device *pdev; 51 struct platform_device *pdev;
59 struct gpio_chip gpio; 52 struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
64 */ 57 */
65 DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); 58 DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
66 59
67 DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS); 60 /* active IRQs - not synced to hardware */
68 DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS); 61 DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
69 DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS); 62 /* active IRQS - synced to hardware */
70 struct dln2_irq_work *irq_work; 63 DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
64 int irq_type[DLN2_GPIO_MAX_PINS];
65 struct mutex irq_lock;
71}; 66};
72 67
73struct dln2_gpio_pin { 68struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
141 return !!ret; 136 return !!ret;
142} 137}
143 138
144static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, 139static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
145 unsigned int pin, int value) 140 unsigned int pin, int value)
146{ 141{
147 struct dln2_gpio_pin_val req = { 142 struct dln2_gpio_pin_val req = {
148 .pin = cpu_to_le16(pin), 143 .pin = cpu_to_le16(pin),
149 .value = value, 144 .value = value,
150 }; 145 };
151 146
152 dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, 147 return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
153 sizeof(req)); 148 sizeof(req));
154} 149}
155 150
156#define DLN2_GPIO_DIRECTION_IN 0 151#define DLN2_GPIO_DIRECTION_IN 0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
267static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 262static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
268 int value) 263 int value)
269{ 264{
265 struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
266 int ret;
267
268 ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
269 if (ret < 0)
270 return ret;
271
270 return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); 272 return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
271} 273}
272 274
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
297 &req, sizeof(req)); 299 &req, sizeof(req));
298} 300}
299 301
300static void dln2_irq_work(struct work_struct *w) 302static void dln2_irq_unmask(struct irq_data *irqd)
301{
302 struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
303 struct dln2_gpio *dln2 = iw->dln2;
304 u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
305
306 if (test_bit(iw->pin, dln2->irqs_enabled))
307 dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
308 else
309 dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
310}
311
312static void dln2_irq_enable(struct irq_data *irqd)
313{
314 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
315 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
316 int pin = irqd_to_hwirq(irqd);
317
318 set_bit(pin, dln2->irqs_enabled);
319 schedule_work(&dln2->irq_work[pin].work);
320}
321
322static void dln2_irq_disable(struct irq_data *irqd)
323{ 303{
324 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 304 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
325 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 305 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
326 int pin = irqd_to_hwirq(irqd); 306 int pin = irqd_to_hwirq(irqd);
327 307
328 clear_bit(pin, dln2->irqs_enabled); 308 set_bit(pin, dln2->unmasked_irqs);
329 schedule_work(&dln2->irq_work[pin].work);
330} 309}
331 310
332static void dln2_irq_mask(struct irq_data *irqd) 311static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
335 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 314 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
336 int pin = irqd_to_hwirq(irqd); 315 int pin = irqd_to_hwirq(irqd);
337 316
338 set_bit(pin, dln2->irqs_masked); 317 clear_bit(pin, dln2->unmasked_irqs);
339}
340
341static void dln2_irq_unmask(struct irq_data *irqd)
342{
343 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
344 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
345 struct device *dev = dln2->gpio.dev;
346 int pin = irqd_to_hwirq(irqd);
347
348 if (test_and_clear_bit(pin, dln2->irqs_pending)) {
349 int irq;
350
351 irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
352 if (!irq) {
353 dev_err(dev, "pin %d not mapped to IRQ\n", pin);
354 return;
355 }
356
357 generic_handle_irq(irq);
358 }
359} 318}
360 319
361static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) 320static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
366 325
367 switch (type) { 326 switch (type) {
368 case IRQ_TYPE_LEVEL_HIGH: 327 case IRQ_TYPE_LEVEL_HIGH:
369 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH; 328 dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
370 break; 329 break;
371 case IRQ_TYPE_LEVEL_LOW: 330 case IRQ_TYPE_LEVEL_LOW:
372 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW; 331 dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
373 break; 332 break;
374 case IRQ_TYPE_EDGE_BOTH: 333 case IRQ_TYPE_EDGE_BOTH:
375 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE; 334 dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
376 break; 335 break;
377 case IRQ_TYPE_EDGE_RISING: 336 case IRQ_TYPE_EDGE_RISING:
378 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING; 337 dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
379 break; 338 break;
380 case IRQ_TYPE_EDGE_FALLING: 339 case IRQ_TYPE_EDGE_FALLING:
381 dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING; 340 dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
382 break; 341 break;
383 default: 342 default:
384 return -EINVAL; 343 return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
387 return 0; 346 return 0;
388} 347}
389 348
349static void dln2_irq_bus_lock(struct irq_data *irqd)
350{
351 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
352 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
353
354 mutex_lock(&dln2->irq_lock);
355}
356
357static void dln2_irq_bus_unlock(struct irq_data *irqd)
358{
359 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
360 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
361 int pin = irqd_to_hwirq(irqd);
362 int enabled, unmasked;
363 unsigned type;
364 int ret;
365
366 enabled = test_bit(pin, dln2->enabled_irqs);
367 unmasked = test_bit(pin, dln2->unmasked_irqs);
368
369 if (enabled != unmasked) {
370 if (unmasked) {
371 type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
372 set_bit(pin, dln2->enabled_irqs);
373 } else {
374 type = DLN2_GPIO_EVENT_NONE;
375 clear_bit(pin, dln2->enabled_irqs);
376 }
377
378 ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
379 if (ret)
380 dev_err(dln2->gpio.dev, "failed to set event\n");
381 }
382
383 mutex_unlock(&dln2->irq_lock);
384}
385
390static struct irq_chip dln2_gpio_irqchip = { 386static struct irq_chip dln2_gpio_irqchip = {
391 .name = "dln2-irq", 387 .name = "dln2-irq",
392 .irq_enable = dln2_irq_enable,
393 .irq_disable = dln2_irq_disable,
394 .irq_mask = dln2_irq_mask, 388 .irq_mask = dln2_irq_mask,
395 .irq_unmask = dln2_irq_unmask, 389 .irq_unmask = dln2_irq_unmask,
396 .irq_set_type = dln2_irq_set_type, 390 .irq_set_type = dln2_irq_set_type,
391 .irq_bus_lock = dln2_irq_bus_lock,
392 .irq_bus_sync_unlock = dln2_irq_bus_unlock,
397}; 393};
398 394
399static void dln2_gpio_event(struct platform_device *pdev, u16 echo, 395static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
425 return; 421 return;
426 } 422 }
427 423
428 if (!test_bit(pin, dln2->irqs_enabled)) 424 switch (dln2->irq_type[pin]) {
429 return;
430 if (test_bit(pin, dln2->irqs_masked)) {
431 set_bit(pin, dln2->irqs_pending);
432 return;
433 }
434
435 switch (dln2->irq_work[pin].type) {
436 case DLN2_GPIO_EVENT_CHANGE_RISING: 425 case DLN2_GPIO_EVENT_CHANGE_RISING:
437 if (event->value) 426 if (event->value)
438 generic_handle_irq(irq); 427 generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
451 struct dln2_gpio *dln2; 440 struct dln2_gpio *dln2;
452 struct device *dev = &pdev->dev; 441 struct device *dev = &pdev->dev;
453 int pins; 442 int pins;
454 int i, ret; 443 int ret;
455 444
456 pins = dln2_gpio_get_pin_count(pdev); 445 pins = dln2_gpio_get_pin_count(pdev);
457 if (pins < 0) { 446 if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
467 if (!dln2) 456 if (!dln2)
468 return -ENOMEM; 457 return -ENOMEM;
469 458
470 dln2->irq_work = devm_kcalloc(&pdev->dev, pins, 459 mutex_init(&dln2->irq_lock);
471 sizeof(struct dln2_irq_work), GFP_KERNEL);
472 if (!dln2->irq_work)
473 return -ENOMEM;
474 for (i = 0; i < pins; i++) {
475 INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
476 dln2->irq_work[i].pin = i;
477 dln2->irq_work[i].dln2 = dln2;
478 }
479 460
480 dln2->pdev = pdev; 461 dln2->pdev = pdev;
481 462
@@ -529,11 +510,8 @@ out:
529static int dln2_gpio_remove(struct platform_device *pdev) 510static int dln2_gpio_remove(struct platform_device *pdev)
530{ 511{
531 struct dln2_gpio *dln2 = platform_get_drvdata(pdev); 512 struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
532 int i;
533 513
534 dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); 514 dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
535 for (i = 0; i < dln2->gpio.ngpio; i++)
536 flush_work(&dln2->irq_work[i].work);
537 gpiochip_remove(&dln2->gpio); 515 gpiochip_remove(&dln2->gpio);
538 516
539 return 0; 517 return 0;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 09daaf2aeb56..3a5a71050559 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
441 err = gpiochip_add(gc); 441 err = gpiochip_add(gc);
442 if (err) { 442 if (err) {
443 dev_err(&ofdev->dev, "Could not add gpiochip\n"); 443 dev_err(&ofdev->dev, "Could not add gpiochip\n");
444 irq_domain_remove(priv->domain); 444 if (priv->domain)
445 irq_domain_remove(priv->domain);
445 return err; 446 return err;
446 } 447 }
447 448
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 604dbe60bdee..08261f2b3a82 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
45 return false; 45 return false;
46 46
47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); 47 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
48 if (ret < 0) 48 if (ret < 0) {
49 return false; 49 /* We've found the gpio chip, but the translation failed.
50 * Return true to stop looking and return the translation
51 * error via out_gpio
52 */
53 gg_data->out_gpio = ERR_PTR(ret);
54 return true;
55 }
50 56
51 gg_data->out_gpio = gpiochip_get_desc(gc, ret); 57 gg_data->out_gpio = gpiochip_get_desc(gc, ret);
52 return true; 58 return true;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 2ac1800b58bb..f62aa115d79a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
128 return status; 128 return status;
129} 129}
130 130
131static const DEVICE_ATTR(value, 0644, 131static DEVICE_ATTR(value, 0644,
132 gpio_value_show, gpio_value_store); 132 gpio_value_show, gpio_value_store);
133 133
134static irqreturn_t gpio_sysfs_irq(int irq, void *priv) 134static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev,
353 return status ? : size; 353 return status ? : size;
354} 354}
355 355
356static const DEVICE_ATTR(active_low, 0644, 356static DEVICE_ATTR(active_low, 0644,
357 gpio_active_low_show, gpio_active_low_store); 357 gpio_active_low_show, gpio_active_low_store);
358 358
359static const struct attribute *gpio_attrs[] = { 359static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
360 int n)
361{
362 struct device *dev = container_of(kobj, struct device, kobj);
363 struct gpio_desc *desc = dev_get_drvdata(dev);
364 umode_t mode = attr->mode;
365 bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
366
367 if (attr == &dev_attr_direction.attr) {
368 if (!show_direction)
369 mode = 0;
370 } else if (attr == &dev_attr_edge.attr) {
371 if (gpiod_to_irq(desc) < 0)
372 mode = 0;
373 if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
374 mode = 0;
375 }
376
377 return mode;
378}
379
380static struct attribute *gpio_attrs[] = {
381 &dev_attr_direction.attr,
382 &dev_attr_edge.attr,
360 &dev_attr_value.attr, 383 &dev_attr_value.attr,
361 &dev_attr_active_low.attr, 384 &dev_attr_active_low.attr,
362 NULL, 385 NULL,
363}; 386};
364 387
365static const struct attribute_group gpio_attr_group = { 388static const struct attribute_group gpio_group = {
366 .attrs = (struct attribute **) gpio_attrs, 389 .attrs = gpio_attrs,
390 .is_visible = gpio_is_visible,
391};
392
393static const struct attribute_group *gpio_groups[] = {
394 &gpio_group,
395 NULL
367}; 396};
368 397
369/* 398/*
@@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
400} 429}
401static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); 430static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
402 431
403static const struct attribute *gpiochip_attrs[] = { 432static struct attribute *gpiochip_attrs[] = {
404 &dev_attr_base.attr, 433 &dev_attr_base.attr,
405 &dev_attr_label.attr, 434 &dev_attr_label.attr,
406 &dev_attr_ngpio.attr, 435 &dev_attr_ngpio.attr,
407 NULL, 436 NULL,
408}; 437};
409 438ATTRIBUTE_GROUPS(gpiochip);
410static const struct attribute_group gpiochip_attr_group = {
411 .attrs = (struct attribute **) gpiochip_attrs,
412};
413 439
414/* 440/*
415 * /sys/class/gpio/export ... write-only 441 * /sys/class/gpio/export ... write-only
@@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
556 goto fail_unlock; 582 goto fail_unlock;
557 } 583 }
558 584
559 if (!desc->chip->direction_input || !desc->chip->direction_output) 585 if (desc->chip->direction_input && desc->chip->direction_output &&
560 direction_may_change = false; 586 direction_may_change) {
587 set_bit(FLAG_SYSFS_DIR, &desc->flags);
588 }
589
561 spin_unlock_irqrestore(&gpio_lock, flags); 590 spin_unlock_irqrestore(&gpio_lock, flags);
562 591
563 offset = gpio_chip_hwgpio(desc); 592 offset = gpio_chip_hwgpio(desc);
564 if (desc->chip->names && desc->chip->names[offset]) 593 if (desc->chip->names && desc->chip->names[offset])
565 ioname = desc->chip->names[offset]; 594 ioname = desc->chip->names[offset];
566 595
567 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 596 dev = device_create_with_groups(&gpio_class, desc->chip->dev,
568 desc, ioname ? ioname : "gpio%u", 597 MKDEV(0, 0), desc, gpio_groups,
569 desc_to_gpio(desc)); 598 ioname ? ioname : "gpio%u",
599 desc_to_gpio(desc));
570 if (IS_ERR(dev)) { 600 if (IS_ERR(dev)) {
571 status = PTR_ERR(dev); 601 status = PTR_ERR(dev);
572 goto fail_unlock; 602 goto fail_unlock;
573 } 603 }
574 604
575 status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
576 if (status)
577 goto fail_unregister_device;
578
579 if (direction_may_change) {
580 status = device_create_file(dev, &dev_attr_direction);
581 if (status)
582 goto fail_unregister_device;
583 }
584
585 if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
586 !test_bit(FLAG_IS_OUT, &desc->flags))) {
587 status = device_create_file(dev, &dev_attr_edge);
588 if (status)
589 goto fail_unregister_device;
590 }
591
592 set_bit(FLAG_EXPORT, &desc->flags); 605 set_bit(FLAG_EXPORT, &desc->flags);
593 mutex_unlock(&sysfs_lock); 606 mutex_unlock(&sysfs_lock);
594 return 0; 607 return 0;
595 608
596fail_unregister_device:
597 device_unregister(dev);
598fail_unlock: 609fail_unlock:
599 mutex_unlock(&sysfs_lock); 610 mutex_unlock(&sysfs_lock);
600 gpiod_dbg(desc, "%s: status %d\n", __func__, status); 611 gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc)
718 dev = class_find_device(&gpio_class, NULL, desc, match_export); 729 dev = class_find_device(&gpio_class, NULL, desc, match_export);
719 if (dev) { 730 if (dev) {
720 gpio_setup_irq(desc, dev, 0); 731 gpio_setup_irq(desc, dev, 0);
732 clear_bit(FLAG_SYSFS_DIR, &desc->flags);
721 clear_bit(FLAG_EXPORT, &desc->flags); 733 clear_bit(FLAG_EXPORT, &desc->flags);
722 } else 734 } else
723 status = -ENODEV; 735 status = -ENODEV;
@@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip)
750 762
751 /* use chip->base for the ID; it's already known to be unique */ 763 /* use chip->base for the ID; it's already known to be unique */
752 mutex_lock(&sysfs_lock); 764 mutex_lock(&sysfs_lock);
753 dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, 765 dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
754 "gpiochip%d", chip->base); 766 chip, gpiochip_groups,
755 if (!IS_ERR(dev)) { 767 "gpiochip%d", chip->base);
756 status = sysfs_create_group(&dev->kobj, 768 if (IS_ERR(dev))
757 &gpiochip_attr_group);
758 } else
759 status = PTR_ERR(dev); 769 status = PTR_ERR(dev);
770 else
771 status = 0;
760 chip->exported = (status == 0); 772 chip->exported = (status == 0);
761 mutex_unlock(&sysfs_lock); 773 mutex_unlock(&sysfs_lock);
762 774
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 487afe6f22fc..568aa2b6bdb0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip)
248 base = gpiochip_find_base(chip->ngpio); 248 base = gpiochip_find_base(chip->ngpio);
249 if (base < 0) { 249 if (base < 0) {
250 status = base; 250 status = base;
251 goto unlock; 251 spin_unlock_irqrestore(&gpio_lock, flags);
252 goto err_free_descs;
252 } 253 }
253 chip->base = base; 254 chip->base = base;
254 } 255 }
255 256
256 status = gpiochip_add_to_list(chip); 257 status = gpiochip_add_to_list(chip);
258 if (status) {
259 spin_unlock_irqrestore(&gpio_lock, flags);
260 goto err_free_descs;
261 }
257 262
258 if (status == 0) { 263 for (id = 0; id < chip->ngpio; id++) {
259 for (id = 0; id < chip->ngpio; id++) { 264 struct gpio_desc *desc = &descs[id];
260 struct gpio_desc *desc = &descs[id]; 265
261 desc->chip = chip; 266 desc->chip = chip;
262 267
263 /* REVISIT: most hardware initializes GPIOs as 268 /* REVISIT: most hardware initializes GPIOs as inputs (often
264 * inputs (often with pullups enabled) so power 269 * with pullups enabled) so power usage is minimized. Linux
265 * usage is minimized. Linux code should set the 270 * code should set the gpio direction first thing; but until
266 * gpio direction first thing; but until it does, 271 * it does, and in case chip->get_direction is not set, we may
267 * and in case chip->get_direction is not set, 272 * expose the wrong direction in sysfs.
268 * we may expose the wrong direction in sysfs. 273 */
269 */ 274 desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
270 desc->flags = !chip->direction_input
271 ? (1 << FLAG_IS_OUT)
272 : 0;
273 }
274 } 275 }
275 276
276 chip->desc = descs; 277 chip->desc = descs;
@@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip)
284 of_gpiochip_add(chip); 285 of_gpiochip_add(chip);
285 acpi_gpiochip_add(chip); 286 acpi_gpiochip_add(chip);
286 287
287 if (status)
288 goto fail;
289
290 status = gpiochip_export(chip); 288 status = gpiochip_export(chip);
291 if (status) 289 if (status)
292 goto fail; 290 goto err_remove_chip;
293 291
294 pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, 292 pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
295 chip->base, chip->base + chip->ngpio - 1, 293 chip->base, chip->base + chip->ngpio - 1,
@@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip)
297 295
298 return 0; 296 return 0;
299 297
300unlock: 298err_remove_chip:
299 acpi_gpiochip_remove(chip);
300 of_gpiochip_remove(chip);
301 spin_lock_irqsave(&gpio_lock, flags);
302 list_del(&chip->list);
301 spin_unlock_irqrestore(&gpio_lock, flags); 303 spin_unlock_irqrestore(&gpio_lock, flags);
302fail:
303 kfree(descs);
304 chip->desc = NULL; 304 chip->desc = NULL;
305err_free_descs:
306 kfree(descs);
305 307
306 /* failures here can mean systems won't boot... */ 308 /* failures here can mean systems won't boot... */
307 pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, 309 pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
@@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip)
325 unsigned long flags; 327 unsigned long flags;
326 unsigned id; 328 unsigned id;
327 329
328 acpi_gpiochip_remove(chip); 330 gpiochip_unexport(chip);
329
330 spin_lock_irqsave(&gpio_lock, flags);
331 331
332 gpiochip_irqchip_remove(chip); 332 gpiochip_irqchip_remove(chip);
333
334 acpi_gpiochip_remove(chip);
333 gpiochip_remove_pin_ranges(chip); 335 gpiochip_remove_pin_ranges(chip);
334 of_gpiochip_remove(chip); 336 of_gpiochip_remove(chip);
335 337
338 spin_lock_irqsave(&gpio_lock, flags);
336 for (id = 0; id < chip->ngpio; id++) { 339 for (id = 0; id < chip->ngpio; id++) {
337 if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) 340 if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
338 dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); 341 dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip)
342 345
343 list_del(&chip->list); 346 list_del(&chip->list);
344 spin_unlock_irqrestore(&gpio_lock, flags); 347 spin_unlock_irqrestore(&gpio_lock, flags);
345 gpiochip_unexport(chip);
346 348
347 kfree(chip->desc); 349 kfree(chip->desc);
348 chip->desc = NULL; 350 chip->desc = NULL;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e3a52113a541..550a5eafbd38 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -77,6 +77,7 @@ struct gpio_desc {
77#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ 77#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
78#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ 78#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
79#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ 79#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
80#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */
80 81
81#define ID_SHIFT 16 /* add new flags before this one */ 82#define ID_SHIFT 16 /* add new flags before this one */
82 83
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 66e40398b3d3..e620807418ea 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
37obj-$(CONFIG_DRM_TTM) += ttm/ 37obj-$(CONFIG_DRM_TTM) += ttm/
38obj-$(CONFIG_DRM_TDFX) += tdfx/ 38obj-$(CONFIG_DRM_TDFX) += tdfx/
39obj-$(CONFIG_DRM_R128) += r128/ 39obj-$(CONFIG_DRM_R128) += r128/
40obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
40obj-$(CONFIG_DRM_RADEON)+= radeon/ 41obj-$(CONFIG_DRM_RADEON)+= radeon/
41obj-$(CONFIG_DRM_MGA) += mga/ 42obj-$(CONFIG_DRM_MGA) += mga/
42obj-$(CONFIG_DRM_I810) += i810/ 43obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
67obj-y += i2c/ 68obj-y += i2c/
68obj-y += panel/ 69obj-y += panel/
69obj-y += bridge/ 70obj-y += bridge/
70obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 4f7b275f2f7b..fcfdf23e1913 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,6 @@
31#include <uapi/linux/kfd_ioctl.h> 31#include <uapi/linux/kfd_ioctl.h>
32#include <linux/time.h> 32#include <linux/time.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/uaccess.h>
35#include <uapi/asm-generic/mman-common.h> 34#include <uapi/asm-generic/mman-common.h>
36#include <asm/processor.h> 35#include <asm/processor.h>
37#include "kfd_priv.h" 36#include "kfd_priv.h"
@@ -121,27 +120,20 @@ static int kfd_open(struct inode *inode, struct file *filep)
121 if (IS_ERR(process)) 120 if (IS_ERR(process))
122 return PTR_ERR(process); 121 return PTR_ERR(process);
123 122
124 process->is_32bit_user_mode = is_32bit_user_mode;
125
126 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 123 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
127 process->pasid, process->is_32bit_user_mode); 124 process->pasid, process->is_32bit_user_mode);
128 125
129 kfd_init_apertures(process);
130
131 return 0; 126 return 0;
132} 127}
133 128
134static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 129static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
135 void __user *arg) 130 void *data)
136{ 131{
137 struct kfd_ioctl_get_version_args args; 132 struct kfd_ioctl_get_version_args *args = data;
138 int err = 0; 133 int err = 0;
139 134
140 args.major_version = KFD_IOCTL_MAJOR_VERSION; 135 args->major_version = KFD_IOCTL_MAJOR_VERSION;
141 args.minor_version = KFD_IOCTL_MINOR_VERSION; 136 args->minor_version = KFD_IOCTL_MINOR_VERSION;
142
143 if (copy_to_user(arg, &args, sizeof(args)))
144 err = -EFAULT;
145 137
146 return err; 138 return err;
147} 139}
@@ -225,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
225 return 0; 217 return 0;
226} 218}
227 219
228static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 220static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
229 void __user *arg) 221 void *data)
230{ 222{
231 struct kfd_ioctl_create_queue_args args; 223 struct kfd_ioctl_create_queue_args *args = data;
232 struct kfd_dev *dev; 224 struct kfd_dev *dev;
233 int err = 0; 225 int err = 0;
234 unsigned int queue_id; 226 unsigned int queue_id;
@@ -237,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
237 229
238 memset(&q_properties, 0, sizeof(struct queue_properties)); 230 memset(&q_properties, 0, sizeof(struct queue_properties));
239 231
240 if (copy_from_user(&args, arg, sizeof(args)))
241 return -EFAULT;
242
243 pr_debug("kfd: creating queue ioctl\n"); 232 pr_debug("kfd: creating queue ioctl\n");
244 233
245 err = set_queue_properties_from_user(&q_properties, &args); 234 err = set_queue_properties_from_user(&q_properties, args);
246 if (err) 235 if (err)
247 return err; 236 return err;
248 237
249 dev = kfd_device_by_id(args.gpu_id); 238 dev = kfd_device_by_id(args->gpu_id);
250 if (dev == NULL) 239 if (dev == NULL)
251 return -EINVAL; 240 return -EINVAL;
252 241
@@ -254,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
254 243
255 pdd = kfd_bind_process_to_device(dev, p); 244 pdd = kfd_bind_process_to_device(dev, p);
256 if (IS_ERR(pdd)) { 245 if (IS_ERR(pdd)) {
257 err = PTR_ERR(pdd); 246 err = -ESRCH;
258 goto err_bind_process; 247 goto err_bind_process;
259 } 248 }
260 249
@@ -267,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
267 if (err != 0) 256 if (err != 0)
268 goto err_create_queue; 257 goto err_create_queue;
269 258
270 args.queue_id = queue_id; 259 args->queue_id = queue_id;
271 260
272 /* Return gpu_id as doorbell offset for mmap usage */ 261 /* Return gpu_id as doorbell offset for mmap usage */
273 args.doorbell_offset = args.gpu_id << PAGE_SHIFT; 262 args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
274
275 if (copy_to_user(arg, &args, sizeof(args))) {
276 err = -EFAULT;
277 goto err_copy_args_out;
278 }
279 263
280 mutex_unlock(&p->mutex); 264 mutex_unlock(&p->mutex);
281 265
282 pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); 266 pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
283 267
284 pr_debug("ring buffer address == 0x%016llX\n", 268 pr_debug("ring buffer address == 0x%016llX\n",
285 args.ring_base_address); 269 args->ring_base_address);
286 270
287 pr_debug("read ptr address == 0x%016llX\n", 271 pr_debug("read ptr address == 0x%016llX\n",
288 args.read_pointer_address); 272 args->read_pointer_address);
289 273
290 pr_debug("write ptr address == 0x%016llX\n", 274 pr_debug("write ptr address == 0x%016llX\n",
291 args.write_pointer_address); 275 args->write_pointer_address);
292 276
293 return 0; 277 return 0;
294 278
295err_copy_args_out:
296 pqm_destroy_queue(&p->pqm, queue_id);
297err_create_queue: 279err_create_queue:
298err_bind_process: 280err_bind_process:
299 mutex_unlock(&p->mutex); 281 mutex_unlock(&p->mutex);
@@ -301,99 +283,90 @@ err_bind_process:
301} 283}
302 284
303static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, 285static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
304 void __user *arg) 286 void *data)
305{ 287{
306 int retval; 288 int retval;
307 struct kfd_ioctl_destroy_queue_args args; 289 struct kfd_ioctl_destroy_queue_args *args = data;
308
309 if (copy_from_user(&args, arg, sizeof(args)))
310 return -EFAULT;
311 290
312 pr_debug("kfd: destroying queue id %d for PASID %d\n", 291 pr_debug("kfd: destroying queue id %d for PASID %d\n",
313 args.queue_id, 292 args->queue_id,
314 p->pasid); 293 p->pasid);
315 294
316 mutex_lock(&p->mutex); 295 mutex_lock(&p->mutex);
317 296
318 retval = pqm_destroy_queue(&p->pqm, args.queue_id); 297 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
319 298
320 mutex_unlock(&p->mutex); 299 mutex_unlock(&p->mutex);
321 return retval; 300 return retval;
322} 301}
323 302
324static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, 303static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
325 void __user *arg) 304 void *data)
326{ 305{
327 int retval; 306 int retval;
328 struct kfd_ioctl_update_queue_args args; 307 struct kfd_ioctl_update_queue_args *args = data;
329 struct queue_properties properties; 308 struct queue_properties properties;
330 309
331 if (copy_from_user(&args, arg, sizeof(args))) 310 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
332 return -EFAULT;
333
334 if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
335 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 311 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
336 return -EINVAL; 312 return -EINVAL;
337 } 313 }
338 314
339 if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) { 315 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
340 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 316 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
341 return -EINVAL; 317 return -EINVAL;
342 } 318 }
343 319
344 if ((args.ring_base_address) && 320 if ((args->ring_base_address) &&
345 (!access_ok(VERIFY_WRITE, 321 (!access_ok(VERIFY_WRITE,
346 (const void __user *) args.ring_base_address, 322 (const void __user *) args->ring_base_address,
347 sizeof(uint64_t)))) { 323 sizeof(uint64_t)))) {
348 pr_err("kfd: can't access ring base address\n"); 324 pr_err("kfd: can't access ring base address\n");
349 return -EFAULT; 325 return -EFAULT;
350 } 326 }
351 327
352 if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) { 328 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
353 pr_err("kfd: ring size must be a power of 2 or 0\n"); 329 pr_err("kfd: ring size must be a power of 2 or 0\n");
354 return -EINVAL; 330 return -EINVAL;
355 } 331 }
356 332
357 properties.queue_address = args.ring_base_address; 333 properties.queue_address = args->ring_base_address;
358 properties.queue_size = args.ring_size; 334 properties.queue_size = args->ring_size;
359 properties.queue_percent = args.queue_percentage; 335 properties.queue_percent = args->queue_percentage;
360 properties.priority = args.queue_priority; 336 properties.priority = args->queue_priority;
361 337
362 pr_debug("kfd: updating queue id %d for PASID %d\n", 338 pr_debug("kfd: updating queue id %d for PASID %d\n",
363 args.queue_id, p->pasid); 339 args->queue_id, p->pasid);
364 340
365 mutex_lock(&p->mutex); 341 mutex_lock(&p->mutex);
366 342
367 retval = pqm_update_queue(&p->pqm, args.queue_id, &properties); 343 retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
368 344
369 mutex_unlock(&p->mutex); 345 mutex_unlock(&p->mutex);
370 346
371 return retval; 347 return retval;
372} 348}
373 349
374static long kfd_ioctl_set_memory_policy(struct file *filep, 350static int kfd_ioctl_set_memory_policy(struct file *filep,
375 struct kfd_process *p, void __user *arg) 351 struct kfd_process *p, void *data)
376{ 352{
377 struct kfd_ioctl_set_memory_policy_args args; 353 struct kfd_ioctl_set_memory_policy_args *args = data;
378 struct kfd_dev *dev; 354 struct kfd_dev *dev;
379 int err = 0; 355 int err = 0;
380 struct kfd_process_device *pdd; 356 struct kfd_process_device *pdd;
381 enum cache_policy default_policy, alternate_policy; 357 enum cache_policy default_policy, alternate_policy;
382 358
383 if (copy_from_user(&args, arg, sizeof(args))) 359 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
384 return -EFAULT; 360 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
385
386 if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
387 && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
388 return -EINVAL; 361 return -EINVAL;
389 } 362 }
390 363
391 if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 364 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
392 && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 365 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
393 return -EINVAL; 366 return -EINVAL;
394 } 367 }
395 368
396 dev = kfd_device_by_id(args.gpu_id); 369 dev = kfd_device_by_id(args->gpu_id);
397 if (dev == NULL) 370 if (dev == NULL)
398 return -EINVAL; 371 return -EINVAL;
399 372
@@ -401,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
401 374
402 pdd = kfd_bind_process_to_device(dev, p); 375 pdd = kfd_bind_process_to_device(dev, p);
403 if (IS_ERR(pdd)) { 376 if (IS_ERR(pdd)) {
404 err = PTR_ERR(pdd); 377 err = -ESRCH;
405 goto out; 378 goto out;
406 } 379 }
407 380
408 default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 381 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
409 ? cache_policy_coherent : cache_policy_noncoherent; 382 ? cache_policy_coherent : cache_policy_noncoherent;
410 383
411 alternate_policy = 384 alternate_policy =
412 (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 385 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
413 ? cache_policy_coherent : cache_policy_noncoherent; 386 ? cache_policy_coherent : cache_policy_noncoherent;
414 387
415 if (!dev->dqm->set_cache_memory_policy(dev->dqm, 388 if (!dev->dqm->set_cache_memory_policy(dev->dqm,
416 &pdd->qpd, 389 &pdd->qpd,
417 default_policy, 390 default_policy,
418 alternate_policy, 391 alternate_policy,
419 (void __user *)args.alternate_aperture_base, 392 (void __user *)args->alternate_aperture_base,
420 args.alternate_aperture_size)) 393 args->alternate_aperture_size))
421 err = -EINVAL; 394 err = -EINVAL;
422 395
423out: 396out:
@@ -426,53 +399,44 @@ out:
426 return err; 399 return err;
427} 400}
428 401
429static long kfd_ioctl_get_clock_counters(struct file *filep, 402static int kfd_ioctl_get_clock_counters(struct file *filep,
430 struct kfd_process *p, void __user *arg) 403 struct kfd_process *p, void *data)
431{ 404{
432 struct kfd_ioctl_get_clock_counters_args args; 405 struct kfd_ioctl_get_clock_counters_args *args = data;
433 struct kfd_dev *dev; 406 struct kfd_dev *dev;
434 struct timespec time; 407 struct timespec time;
435 408
436 if (copy_from_user(&args, arg, sizeof(args))) 409 dev = kfd_device_by_id(args->gpu_id);
437 return -EFAULT;
438
439 dev = kfd_device_by_id(args.gpu_id);
440 if (dev == NULL) 410 if (dev == NULL)
441 return -EINVAL; 411 return -EINVAL;
442 412
443 /* Reading GPU clock counter from KGD */ 413 /* Reading GPU clock counter from KGD */
444 args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); 414 args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
445 415
446 /* No access to rdtsc. Using raw monotonic time */ 416 /* No access to rdtsc. Using raw monotonic time */
447 getrawmonotonic(&time); 417 getrawmonotonic(&time);
448 args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time); 418 args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
449 419
450 get_monotonic_boottime(&time); 420 get_monotonic_boottime(&time);
451 args.system_clock_counter = (uint64_t)timespec_to_ns(&time); 421 args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
452 422
453 /* Since the counter is in nano-seconds we use 1GHz frequency */ 423 /* Since the counter is in nano-seconds we use 1GHz frequency */
454 args.system_clock_freq = 1000000000; 424 args->system_clock_freq = 1000000000;
455
456 if (copy_to_user(arg, &args, sizeof(args)))
457 return -EFAULT;
458 425
459 return 0; 426 return 0;
460} 427}
461 428
462 429
463static int kfd_ioctl_get_process_apertures(struct file *filp, 430static int kfd_ioctl_get_process_apertures(struct file *filp,
464 struct kfd_process *p, void __user *arg) 431 struct kfd_process *p, void *data)
465{ 432{
466 struct kfd_ioctl_get_process_apertures_args args; 433 struct kfd_ioctl_get_process_apertures_args *args = data;
467 struct kfd_process_device_apertures *pAperture; 434 struct kfd_process_device_apertures *pAperture;
468 struct kfd_process_device *pdd; 435 struct kfd_process_device *pdd;
469 436
470 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); 437 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
471 438
472 if (copy_from_user(&args, arg, sizeof(args))) 439 args->num_of_nodes = 0;
473 return -EFAULT;
474
475 args.num_of_nodes = 0;
476 440
477 mutex_lock(&p->mutex); 441 mutex_lock(&p->mutex);
478 442
@@ -481,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
481 /* Run over all pdd of the process */ 445 /* Run over all pdd of the process */
482 pdd = kfd_get_first_process_device_data(p); 446 pdd = kfd_get_first_process_device_data(p);
483 do { 447 do {
484 pAperture = &args.process_apertures[args.num_of_nodes]; 448 pAperture =
449 &args->process_apertures[args->num_of_nodes];
485 pAperture->gpu_id = pdd->dev->id; 450 pAperture->gpu_id = pdd->dev->id;
486 pAperture->lds_base = pdd->lds_base; 451 pAperture->lds_base = pdd->lds_base;
487 pAperture->lds_limit = pdd->lds_limit; 452 pAperture->lds_limit = pdd->lds_limit;
@@ -491,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
491 pAperture->scratch_limit = pdd->scratch_limit; 456 pAperture->scratch_limit = pdd->scratch_limit;
492 457
493 dev_dbg(kfd_device, 458 dev_dbg(kfd_device,
494 "node id %u\n", args.num_of_nodes); 459 "node id %u\n", args->num_of_nodes);
495 dev_dbg(kfd_device, 460 dev_dbg(kfd_device,
496 "gpu id %u\n", pdd->dev->id); 461 "gpu id %u\n", pdd->dev->id);
497 dev_dbg(kfd_device, 462 dev_dbg(kfd_device,
@@ -507,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
507 dev_dbg(kfd_device, 472 dev_dbg(kfd_device,
508 "scratch_limit %llX\n", pdd->scratch_limit); 473 "scratch_limit %llX\n", pdd->scratch_limit);
509 474
510 args.num_of_nodes++; 475 args->num_of_nodes++;
511 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && 476 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
512 (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 477 (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
513 } 478 }
514 479
515 mutex_unlock(&p->mutex); 480 mutex_unlock(&p->mutex);
516 481
517 if (copy_to_user(arg, &args, sizeof(args)))
518 return -EFAULT;
519
520 return 0; 482 return 0;
521} 483}
522 484
485#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
486 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
487
488/** Ioctl table */
489static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
490 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
491 kfd_ioctl_get_version, 0),
492
493 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
494 kfd_ioctl_create_queue, 0),
495
496 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
497 kfd_ioctl_destroy_queue, 0),
498
499 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
500 kfd_ioctl_set_memory_policy, 0),
501
502 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
503 kfd_ioctl_get_clock_counters, 0),
504
505 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
506 kfd_ioctl_get_process_apertures, 0),
507
508 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
509 kfd_ioctl_update_queue, 0),
510};
511
512#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
513
523static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 514static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
524{ 515{
525 struct kfd_process *process; 516 struct kfd_process *process;
526 long err = -EINVAL; 517 amdkfd_ioctl_t *func;
518 const struct amdkfd_ioctl_desc *ioctl = NULL;
519 unsigned int nr = _IOC_NR(cmd);
520 char stack_kdata[128];
521 char *kdata = NULL;
522 unsigned int usize, asize;
523 int retcode = -EINVAL;
524
525 if (nr >= AMDKFD_CORE_IOCTL_COUNT)
526 goto err_i1;
527
528 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
529 u32 amdkfd_size;
530
531 ioctl = &amdkfd_ioctls[nr];
527 532
528 dev_dbg(kfd_device, 533 amdkfd_size = _IOC_SIZE(ioctl->cmd);
529 "ioctl cmd 0x%x (#%d), arg 0x%lx\n", 534 usize = asize = _IOC_SIZE(cmd);
530 cmd, _IOC_NR(cmd), arg); 535 if (amdkfd_size > asize)
536 asize = amdkfd_size;
537
538 cmd = ioctl->cmd;
539 } else
540 goto err_i1;
541
542 dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
531 543
532 process = kfd_get_process(current); 544 process = kfd_get_process(current);
533 if (IS_ERR(process)) 545 if (IS_ERR(process)) {
534 return PTR_ERR(process); 546 dev_dbg(kfd_device, "no process\n");
547 goto err_i1;
548 }
535 549
536 switch (cmd) { 550 /* Do not trust userspace, use our own definition */
537 case KFD_IOC_GET_VERSION: 551 func = ioctl->func;
538 err = kfd_ioctl_get_version(filep, process, (void __user *)arg); 552
539 break; 553 if (unlikely(!func)) {
540 case KFD_IOC_CREATE_QUEUE: 554 dev_dbg(kfd_device, "no function\n");
541 err = kfd_ioctl_create_queue(filep, process, 555 retcode = -EINVAL;
542 (void __user *)arg); 556 goto err_i1;
543 break;
544
545 case KFD_IOC_DESTROY_QUEUE:
546 err = kfd_ioctl_destroy_queue(filep, process,
547 (void __user *)arg);
548 break;
549
550 case KFD_IOC_SET_MEMORY_POLICY:
551 err = kfd_ioctl_set_memory_policy(filep, process,
552 (void __user *)arg);
553 break;
554
555 case KFD_IOC_GET_CLOCK_COUNTERS:
556 err = kfd_ioctl_get_clock_counters(filep, process,
557 (void __user *)arg);
558 break;
559
560 case KFD_IOC_GET_PROCESS_APERTURES:
561 err = kfd_ioctl_get_process_apertures(filep, process,
562 (void __user *)arg);
563 break;
564
565 case KFD_IOC_UPDATE_QUEUE:
566 err = kfd_ioctl_update_queue(filep, process,
567 (void __user *)arg);
568 break;
569
570 default:
571 dev_err(kfd_device,
572 "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
573 cmd, arg);
574 err = -EINVAL;
575 break;
576 } 557 }
577 558
578 if (err < 0) 559 if (cmd & (IOC_IN | IOC_OUT)) {
579 dev_err(kfd_device, 560 if (asize <= sizeof(stack_kdata)) {
580 "ioctl error %ld for ioctl cmd 0x%x (#%d)\n", 561 kdata = stack_kdata;
581 err, cmd, _IOC_NR(cmd)); 562 } else {
563 kdata = kmalloc(asize, GFP_KERNEL);
564 if (!kdata) {
565 retcode = -ENOMEM;
566 goto err_i1;
567 }
568 }
569 if (asize > usize)
570 memset(kdata + usize, 0, asize - usize);
571 }
582 572
583 return err; 573 if (cmd & IOC_IN) {
574 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
575 retcode = -EFAULT;
576 goto err_i1;
577 }
578 } else if (cmd & IOC_OUT) {
579 memset(kdata, 0, usize);
580 }
581
582 retcode = func(filep, process, kdata);
583
584 if (cmd & IOC_OUT)
585 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
586 retcode = -EFAULT;
587
588err_i1:
589 if (!ioctl)
590 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
591 task_pid_nr(current), cmd, nr);
592
593 if (kdata != stack_kdata)
594 kfree(kdata);
595
596 if (retcode)
597 dev_dbg(kfd_device, "ret = %d\n", retcode);
598
599 return retcode;
584} 600}
585 601
586static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) 602static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 924e90c072e5..9c8961d22360 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
161{ 161{
162 int bit = qpd->vmid - KFD_VMID_START_OFFSET; 162 int bit = qpd->vmid - KFD_VMID_START_OFFSET;
163 163
164 /* Release the vmid mapping */
165 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
166
164 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); 167 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
165 qpd->vmid = 0; 168 qpd->vmid = 0;
166 q->properties.vmid = 0; 169 q->properties.vmid = 0;
@@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
272 return retval; 275 return retval;
273 } 276 }
274 277
278 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
279 q->pipe,
280 q->queue);
281
282 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
283 q->queue, q->properties.write_ptr);
284 if (retval != 0) {
285 deallocate_hqd(dqm, q);
286 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
287 return retval;
288 }
289
275 return 0; 290 return 0;
276} 291}
277 292
@@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
320{ 335{
321 int retval; 336 int retval;
322 struct mqd_manager *mqd; 337 struct mqd_manager *mqd;
338 bool prev_active = false;
323 339
324 BUG_ON(!dqm || !q || !q->mqd); 340 BUG_ON(!dqm || !q || !q->mqd);
325 341
@@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
330 return -ENOMEM; 346 return -ENOMEM;
331 } 347 }
332 348
333 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
334 if (q->properties.is_active == true) 349 if (q->properties.is_active == true)
350 prev_active = true;
351
352 /*
353 *
354 * check active state vs. the previous state
355 * and modify counter accordingly
356 */
357 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
358 if ((q->properties.is_active == true) && (prev_active == false))
335 dqm->queue_count++; 359 dqm->queue_count++;
336 else 360 else if ((q->properties.is_active == false) && (prev_active == true))
337 dqm->queue_count--; 361 dqm->queue_count--;
338 362
339 if (sched_policy != KFD_SCHED_POLICY_NO_HWS) 363 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66df4da01c29..e64aa99e5e41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
299 struct kfd_dev *dev; 299 struct kfd_dev *dev;
300 struct kfd_process_device *pdd; 300 struct kfd_process_device *pdd;
301 301
302 mutex_lock(&process->mutex);
303
304 /*Iterating over all devices*/ 302 /*Iterating over all devices*/
305 while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL && 303 while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
306 id < NUM_OF_SUPPORTED_GPUS) { 304 id < NUM_OF_SUPPORTED_GPUS) {
307 305
308 pdd = kfd_get_process_device_data(dev, process, 1); 306 pdd = kfd_get_process_device_data(dev, process, 1);
307 if (!pdd)
308 return -1;
309 309
310 /* 310 /*
311 * For 64 bit process aperture will be statically reserved in 311 * For 64 bit process aperture will be statically reserved in
@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
348 id++; 348 id++;
349 } 349 }
350 350
351 mutex_unlock(&process->mutex);
352
353 return 0; 351 return 0;
354} 352}
355 353
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index adc31474e786..4c3828cf45bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
184 uint32_t queue_id) 184 uint32_t queue_id)
185{ 185{
186 186
187 return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address, 187 return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
188 pipe_id, queue_id); 188 pipe_id, queue_id);
189 189
190} 190}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 71699ad97d74..4c25ef504f79 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,7 +32,7 @@ int kfd_pasid_init(void)
32{ 32{
33 pasid_limit = max_num_of_processes; 33 pasid_limit = max_num_of_processes;
34 34
35 pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL); 35 pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
36 if (!pasid_bitmap) 36 if (!pasid_bitmap)
37 return -ENOMEM; 37 return -ENOMEM;
38 38
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f9fb81e3bb09..a5edb29507e3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -463,6 +463,24 @@ struct kfd_process {
463 bool is_32bit_user_mode; 463 bool is_32bit_user_mode;
464}; 464};
465 465
466/**
467 * Ioctl function type.
468 *
469 * \param filep pointer to file structure.
470 * \param p amdkfd process pointer.
471 * \param data pointer to arg that was copied from user.
472 */
473typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
474 void *data);
475
476struct amdkfd_ioctl_desc {
477 unsigned int cmd;
478 int flags;
479 amdkfd_ioctl_t *func;
480 unsigned int cmd_drv;
481 const char *name;
482};
483
466void kfd_process_create_wq(void); 484void kfd_process_create_wq(void);
467void kfd_process_destroy_wq(void); 485void kfd_process_destroy_wq(void);
468struct kfd_process *kfd_create_process(const struct task_struct *); 486struct kfd_process *kfd_create_process(const struct task_struct *);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b85eb0b830b4..3c76ef05cbcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -26,6 +26,8 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/amd-iommu.h> 27#include <linux/amd-iommu.h>
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/compat.h>
30
29struct mm_struct; 31struct mm_struct;
30 32
31#include "kfd_priv.h" 33#include "kfd_priv.h"
@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
285 if (err != 0) 287 if (err != 0)
286 goto err_process_pqm_init; 288 goto err_process_pqm_init;
287 289
290 /* init process apertures*/
291 process->is_32bit_user_mode = is_compat_task();
292 if (kfd_init_apertures(process) != 0)
293 goto err_init_apretures;
294
288 return process; 295 return process;
289 296
297err_init_apretures:
298 pqm_uninit(&process->pqm);
290err_process_pqm_init: 299err_process_pqm_init:
291 hash_del_rcu(&process->kfd_processes); 300 hash_del_rcu(&process->kfd_processes);
292 synchronize_rcu(); 301 synchronize_rcu();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5733e2859e8a..cca1708fd811 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
700 dev->node_props.simd_per_cu); 700 dev->node_props.simd_per_cu);
701 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu", 701 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
702 dev->node_props.max_slots_scratch_cu); 702 dev->node_props.max_slots_scratch_cu);
703 sysfs_show_32bit_prop(buffer, "engine_id",
704 dev->node_props.engine_id);
705 sysfs_show_32bit_prop(buffer, "vendor_id", 703 sysfs_show_32bit_prop(buffer, "vendor_id",
706 dev->node_props.vendor_id); 704 dev->node_props.vendor_id);
707 sysfs_show_32bit_prop(buffer, "device_id", 705 sysfs_show_32bit_prop(buffer, "device_id",
@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
715 dev->gpu->kgd)); 713 dev->gpu->kgd));
716 sysfs_show_64bit_prop(buffer, "local_mem_size", 714 sysfs_show_64bit_prop(buffer, "local_mem_size",
717 kfd2kgd->get_vmem_size(dev->gpu->kgd)); 715 kfd2kgd->get_vmem_size(dev->gpu->kgd));
716
717 sysfs_show_32bit_prop(buffer, "fw_version",
718 kfd2kgd->get_fw_version(
719 dev->gpu->kgd,
720 KGD_ENGINE_MEC1));
721
718 } 722 }
719 723
720 ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", 724 ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
@@ -917,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
917 uint32_t i = 0; 921 uint32_t i = 0;
918 922
919 list_for_each_entry(dev, &topology_device_list, list) { 923 list_for_each_entry(dev, &topology_device_list, list) {
920 ret = kfd_build_sysfs_node_entry(dev, 0); 924 ret = kfd_build_sysfs_node_entry(dev, i);
921 if (ret < 0) 925 if (ret < 0)
922 return ret; 926 return ret;
923 i++; 927 i++;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9c729dd8dd50..96a512208fad 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -45,6 +45,17 @@ enum kgd_memory_pool {
45 KGD_POOL_FRAMEBUFFER = 3, 45 KGD_POOL_FRAMEBUFFER = 3,
46}; 46};
47 47
48enum kgd_engine_type {
49 KGD_ENGINE_PFP = 1,
50 KGD_ENGINE_ME,
51 KGD_ENGINE_CE,
52 KGD_ENGINE_MEC1,
53 KGD_ENGINE_MEC2,
54 KGD_ENGINE_RLC,
55 KGD_ENGINE_SDMA,
56 KGD_ENGINE_MAX
57};
58
48struct kgd2kfd_shared_resources { 59struct kgd2kfd_shared_resources {
49 /* Bit n == 1 means VMID n is available for KFD. */ 60 /* Bit n == 1 means VMID n is available for KFD. */
50 unsigned int compute_vmid_bitmap; 61 unsigned int compute_vmid_bitmap;
@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
137 * 148 *
138 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. 149 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
139 * 150 *
151 * @get_fw_version: Returns FW versions from the header
152 *
140 * This structure contains function pointers to services that the kgd driver 153 * This structure contains function pointers to services that the kgd driver
141 * provides to amdkfd driver. 154 * provides to amdkfd driver.
142 * 155 *
@@ -170,12 +183,14 @@ struct kfd2kgd_calls {
170 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 183 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
171 uint32_t queue_id, uint32_t __user *wptr); 184 uint32_t queue_id, uint32_t __user *wptr);
172 185
173 bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address, 186 bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
174 uint32_t pipe_id, uint32_t queue_id); 187 uint32_t pipe_id, uint32_t queue_id);
175 188
176 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, 189 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
177 unsigned int timeout, uint32_t pipe_id, 190 unsigned int timeout, uint32_t pipe_id,
178 uint32_t queue_id); 191 uint32_t queue_id);
192 uint16_t (*get_fw_version)(struct kgd_dev *kgd,
193 enum kgd_engine_type type);
179}; 194};
180 195
181bool kgd2kfd_init(unsigned interface_version, 196bool kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a78a773151c..bbdbe4721573 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
61 struct drm_crtc_state *crtc_state; 61 struct drm_crtc_state *crtc_state;
62 62
63 if (plane->state->crtc) { 63 if (plane->state->crtc) {
64 crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)]; 64 crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
65 65
66 if (WARN_ON(!crtc_state)) 66 if (WARN_ON(!crtc_state))
67 return; 67 return;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f5a5f18efa5b..4d79dad9d44f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
830 * vblank events since the system was booted, including lost events due to 830 * vblank events since the system was booted, including lost events due to
831 * modesetting activity. 831 * modesetting activity.
832 * 832 *
833 * This is the legacy version of drm_crtc_vblank_count().
834 *
833 * Returns: 835 * Returns:
834 * The software vblank counter. 836 * The software vblank counter.
835 */ 837 */
@@ -844,6 +846,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
844EXPORT_SYMBOL(drm_vblank_count); 846EXPORT_SYMBOL(drm_vblank_count);
845 847
846/** 848/**
849 * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
850 * @crtc: which counter to retrieve
851 *
852 * Fetches the "cooked" vblank count value that represents the number of
853 * vblank events since the system was booted, including lost events due to
854 * modesetting activity.
855 *
856 * This is the native KMS version of drm_vblank_count().
857 *
858 * Returns:
859 * The software vblank counter.
860 */
861u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
862{
863 return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
864}
865EXPORT_SYMBOL(drm_crtc_vblank_count);
866
867/**
847 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value 868 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
848 * and the system timestamp corresponding to that vblank counter value. 869 * and the system timestamp corresponding to that vblank counter value.
849 * 870 *
@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
904 * 925 *
905 * Updates sequence # and timestamp on event, and sends it to userspace. 926 * Updates sequence # and timestamp on event, and sends it to userspace.
906 * Caller must hold event lock. 927 * Caller must hold event lock.
928 *
929 * This is the legacy version of drm_crtc_send_vblank_event().
907 */ 930 */
908void drm_send_vblank_event(struct drm_device *dev, int crtc, 931void drm_send_vblank_event(struct drm_device *dev, int crtc,
909 struct drm_pending_vblank_event *e) 932 struct drm_pending_vblank_event *e)
@@ -923,6 +946,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
923EXPORT_SYMBOL(drm_send_vblank_event); 946EXPORT_SYMBOL(drm_send_vblank_event);
924 947
925/** 948/**
949 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
950 * @crtc: the source CRTC of the vblank event
951 * @e: the event to send
952 *
953 * Updates sequence # and timestamp on event, and sends it to userspace.
954 * Caller must hold event lock.
955 *
956 * This is the native KMS version of drm_send_vblank_event().
957 */
958void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
959 struct drm_pending_vblank_event *e)
960{
961 drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
962}
963EXPORT_SYMBOL(drm_crtc_send_vblank_event);
964
965/**
926 * drm_vblank_enable - enable the vblank interrupt on a CRTC 966 * drm_vblank_enable - enable the vblank interrupt on a CRTC
927 * @dev: DRM device 967 * @dev: DRM device
928 * @crtc: CRTC in question 968 * @crtc: CRTC in question
@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1594 * 1634 *
1595 * Drivers should call this routine in their vblank interrupt handlers to 1635 * Drivers should call this routine in their vblank interrupt handlers to
1596 * update the vblank counter and send any signals that may be pending. 1636 * update the vblank counter and send any signals that may be pending.
1637 *
1638 * This is the legacy version of drm_crtc_handle_vblank().
1597 */ 1639 */
1598bool drm_handle_vblank(struct drm_device *dev, int crtc) 1640bool drm_handle_vblank(struct drm_device *dev, int crtc)
1599{ 1641{
@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1670 return true; 1712 return true;
1671} 1713}
1672EXPORT_SYMBOL(drm_handle_vblank); 1714EXPORT_SYMBOL(drm_handle_vblank);
1715
1716/**
1717 * drm_crtc_handle_vblank - handle a vblank event
1718 * @crtc: where this event occurred
1719 *
1720 * Drivers should call this routine in their vblank interrupt handlers to
1721 * update the vblank counter and send any signals that may be pending.
1722 *
1723 * This is the native KMS version of drm_handle_vblank().
1724 *
1725 * Returns:
1726 * True if the event was successfully handled, false on failure.
1727 */
1728bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
1729{
1730 return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
1731}
1732EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f990ab4c3efb..574057cd1d09 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev)
811 if (!i915.reset) 811 if (!i915.reset)
812 return 0; 812 return 0;
813 813
814 intel_reset_gt_powersave(dev);
815
814 mutex_lock(&dev->struct_mutex); 816 mutex_lock(&dev->struct_mutex);
815 817
816 i915_gem_reset(dev); 818 i915_gem_reset(dev);
@@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev)
880 * of re-init after reset. 882 * of re-init after reset.
881 */ 883 */
882 if (INTEL_INFO(dev)->gen > 5) 884 if (INTEL_INFO(dev)->gen > 5)
883 intel_reset_gt_powersave(dev); 885 intel_enable_gt_powersave(dev);
884 } else { 886 } else {
885 mutex_unlock(&dev->struct_mutex); 887 mutex_unlock(&dev->struct_mutex);
886 } 888 }
@@ -1584,7 +1586,7 @@ static struct drm_driver driver = {
1584 .gem_prime_import = i915_gem_prime_import, 1586 .gem_prime_import = i915_gem_prime_import,
1585 1587
1586 .dumb_create = i915_gem_dumb_create, 1588 .dumb_create = i915_gem_dumb_create,
1587 .dumb_map_offset = i915_gem_dumb_map_offset, 1589 .dumb_map_offset = i915_gem_mmap_gtt,
1588 .dumb_destroy = drm_gem_dumb_destroy, 1590 .dumb_destroy = drm_gem_dumb_destroy,
1589 .ioctls = i915_ioctls, 1591 .ioctls = i915_ioctls,
1590 .fops = &i915_driver_fops, 1592 .fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 63bcda5541ec..e9f891c432f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
1756 */ 1756 */
1757 struct workqueue_struct *dp_wq; 1757 struct workqueue_struct *dp_wq;
1758 1758
1759 uint32_t bios_vgacntr;
1760
1761 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1759 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1762 struct { 1760 struct {
1763 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, 1761 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -2501,9 +2499,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
2501int i915_gem_dumb_create(struct drm_file *file_priv, 2499int i915_gem_dumb_create(struct drm_file *file_priv,
2502 struct drm_device *dev, 2500 struct drm_device *dev,
2503 struct drm_mode_create_dumb *args); 2501 struct drm_mode_create_dumb *args);
2504int i915_gem_dumb_map_offset(struct drm_file *file_priv, 2502int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2505 struct drm_device *dev, uint32_t handle, 2503 uint32_t handle, uint64_t *offset);
2506 uint64_t *offset);
2507/** 2504/**
2508 * Returns true if seq1 is later than seq2. 2505 * Returns true if seq1 is later than seq2.
2509 */ 2506 */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a9faea626db..c11603b4cf1d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -401,7 +401,6 @@ static int
401i915_gem_create(struct drm_file *file, 401i915_gem_create(struct drm_file *file,
402 struct drm_device *dev, 402 struct drm_device *dev,
403 uint64_t size, 403 uint64_t size,
404 bool dumb,
405 uint32_t *handle_p) 404 uint32_t *handle_p)
406{ 405{
407 struct drm_i915_gem_object *obj; 406 struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
417 if (obj == NULL) 416 if (obj == NULL)
418 return -ENOMEM; 417 return -ENOMEM;
419 418
420 obj->base.dumb = dumb;
421 ret = drm_gem_handle_create(file, &obj->base, &handle); 419 ret = drm_gem_handle_create(file, &obj->base, &handle);
422 /* drop reference from allocate - handle holds it now */ 420 /* drop reference from allocate - handle holds it now */
423 drm_gem_object_unreference_unlocked(&obj->base); 421 drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
437 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 435 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
438 args->size = args->pitch * args->height; 436 args->size = args->pitch * args->height;
439 return i915_gem_create(file, dev, 437 return i915_gem_create(file, dev,
440 args->size, true, &args->handle); 438 args->size, &args->handle);
441} 439}
442 440
443/** 441/**
@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
450 struct drm_i915_gem_create *args = data; 448 struct drm_i915_gem_create *args = data;
451 449
452 return i915_gem_create(file, dev, 450 return i915_gem_create(file, dev,
453 args->size, false, &args->handle); 451 args->size, &args->handle);
454} 452}
455 453
456static inline int 454static inline int
@@ -1050,6 +1048,7 @@ int
1050i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1048i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1051 struct drm_file *file) 1049 struct drm_file *file)
1052{ 1050{
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1053 struct drm_i915_gem_pwrite *args = data; 1052 struct drm_i915_gem_pwrite *args = data;
1054 struct drm_i915_gem_object *obj; 1053 struct drm_i915_gem_object *obj;
1055 int ret; 1054 int ret;
@@ -1069,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1069 return -EFAULT; 1068 return -EFAULT;
1070 } 1069 }
1071 1070
1071 intel_runtime_pm_get(dev_priv);
1072
1072 ret = i915_mutex_lock_interruptible(dev); 1073 ret = i915_mutex_lock_interruptible(dev);
1073 if (ret) 1074 if (ret)
1074 return ret; 1075 goto put_rpm;
1075 1076
1076 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1077 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1077 if (&obj->base == NULL) { 1078 if (&obj->base == NULL) {
@@ -1123,6 +1124,9 @@ out:
1123 drm_gem_object_unreference(&obj->base); 1124 drm_gem_object_unreference(&obj->base);
1124unlock: 1125unlock:
1125 mutex_unlock(&dev->struct_mutex); 1126 mutex_unlock(&dev->struct_mutex);
1127put_rpm:
1128 intel_runtime_pm_put(dev_priv);
1129
1126 return ret; 1130 return ret;
1127} 1131}
1128 1132
@@ -1840,10 +1844,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1840 drm_gem_free_mmap_offset(&obj->base); 1844 drm_gem_free_mmap_offset(&obj->base);
1841} 1845}
1842 1846
1843static int 1847int
1844i915_gem_mmap_gtt(struct drm_file *file, 1848i915_gem_mmap_gtt(struct drm_file *file,
1845 struct drm_device *dev, 1849 struct drm_device *dev,
1846 uint32_t handle, bool dumb, 1850 uint32_t handle,
1847 uint64_t *offset) 1851 uint64_t *offset)
1848{ 1852{
1849 struct drm_i915_private *dev_priv = dev->dev_private; 1853 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1864,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
1860 goto unlock; 1864 goto unlock;
1861 } 1865 }
1862 1866
1863 /*
1864 * We don't allow dumb mmaps on objects created using another
1865 * interface.
1866 */
1867 WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1868 "Illegal dumb map of accelerated buffer.\n");
1869
1870 if (obj->base.size > dev_priv->gtt.mappable_end) { 1867 if (obj->base.size > dev_priv->gtt.mappable_end) {
1871 ret = -E2BIG; 1868 ret = -E2BIG;
1872 goto out; 1869 goto out;
@@ -1891,15 +1888,6 @@ unlock:
1891 return ret; 1888 return ret;
1892} 1889}
1893 1890
1894int
1895i915_gem_dumb_map_offset(struct drm_file *file,
1896 struct drm_device *dev,
1897 uint32_t handle,
1898 uint64_t *offset)
1899{
1900 return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1901}
1902
1903/** 1891/**
1904 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1892 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1905 * @dev: DRM device 1893 * @dev: DRM device
@@ -1921,7 +1909,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1921{ 1909{
1922 struct drm_i915_gem_mmap_gtt *args = data; 1910 struct drm_i915_gem_mmap_gtt *args = data;
1923 1911
1924 return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); 1912 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1925} 1913}
1926 1914
1927static inline int 1915static inline int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d17ff435f276..d011ec82ef1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
473 u32 hw_flags) 473 u32 hw_flags)
474{ 474{
475 u32 flags = hw_flags | MI_MM_SPACE_GTT; 475 u32 flags = hw_flags | MI_MM_SPACE_GTT;
476 int ret; 476 const int num_rings =
477 /* Use an extended w/a on ivb+ if signalling from other rings */
478 i915_semaphore_is_enabled(ring->dev) ?
479 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
480 0;
481 int len, i, ret;
477 482
478 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 483 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
479 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 484 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
490 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) 495 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
491 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 496 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
492 497
493 ret = intel_ring_begin(ring, 6); 498
499 len = 4;
500 if (INTEL_INFO(ring->dev)->gen >= 7)
501 len += 2 + (num_rings ? 4*num_rings + 2 : 0);
502
503 ret = intel_ring_begin(ring, len);
494 if (ret) 504 if (ret)
495 return ret; 505 return ret;
496 506
497 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 507 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
498 if (INTEL_INFO(ring->dev)->gen >= 7) 508 if (INTEL_INFO(ring->dev)->gen >= 7) {
499 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 509 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
500 else 510 if (num_rings) {
501 intel_ring_emit(ring, MI_NOOP); 511 struct intel_engine_cs *signaller;
512
513 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
514 for_each_ring(signaller, to_i915(ring->dev), i) {
515 if (signaller == ring)
516 continue;
517
518 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
519 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
520 }
521 }
522 }
502 523
503 intel_ring_emit(ring, MI_NOOP); 524 intel_ring_emit(ring, MI_NOOP);
504 intel_ring_emit(ring, MI_SET_CONTEXT); 525 intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
510 */ 531 */
511 intel_ring_emit(ring, MI_NOOP); 532 intel_ring_emit(ring, MI_NOOP);
512 533
513 if (INTEL_INFO(ring->dev)->gen >= 7) 534 if (INTEL_INFO(ring->dev)->gen >= 7) {
535 if (num_rings) {
536 struct intel_engine_cs *signaller;
537
538 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
539 for_each_ring(signaller, to_i915(ring->dev), i) {
540 if (signaller == ring)
541 continue;
542
543 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
544 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
545 }
546 }
514 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 547 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
515 else 548 }
516 intel_ring_emit(ring, MI_NOOP);
517 549
518 intel_ring_advance(ring); 550 intel_ring_advance(ring);
519 551
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f06027ba3ee5..11738316394a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
121 goto err; 121 goto err;
122 } 122 }
123 123
124 WARN_ONCE(obj->base.dumb,
125 "GPU use of dumb buffer is illegal.\n");
126
127 drm_gem_object_reference(&obj->base); 124 drm_gem_object_reference(&obj->base);
128 list_add_tail(&obj->obj_exec_link, &objects); 125 list_add_tail(&obj->obj_exec_link, &objects);
129 } 126 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 981834b0f9b6..d0d3dfbe6d2a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
281 struct drm_i915_private *dev_priv = dev->dev_private; 281 struct drm_i915_private *dev_priv = dev->dev_private;
282 282
283 spin_lock_irq(&dev_priv->irq_lock); 283 spin_lock_irq(&dev_priv->irq_lock);
284
284 WARN_ON(dev_priv->rps.pm_iir); 285 WARN_ON(dev_priv->rps.pm_iir);
285 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 286 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
286 dev_priv->rps.interrupts_enabled = true; 287 dev_priv->rps.interrupts_enabled = true;
288 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
289 dev_priv->pm_rps_events);
287 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 290 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
291
288 spin_unlock_irq(&dev_priv->irq_lock); 292 spin_unlock_irq(&dev_priv->irq_lock);
289} 293}
290 294
@@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3307 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3311 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3308 3312
3309 if (INTEL_INFO(dev)->gen >= 6) { 3313 if (INTEL_INFO(dev)->gen >= 6) {
3310 pm_irqs |= dev_priv->pm_rps_events; 3314 /*
3311 3315 * RPS interrupts will get enabled/disabled on demand when RPS
3316 * itself is enabled/disabled.
3317 */
3312 if (HAS_VEBOX(dev)) 3318 if (HAS_VEBOX(dev))
3313 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3319 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3314 3320
@@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3520 dev_priv->pm_irq_mask = 0xffffffff; 3526 dev_priv->pm_irq_mask = 0xffffffff;
3521 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3527 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3522 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3528 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3523 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); 3529 /*
3530 * RPS interrupts will get enabled/disabled on demand when RPS itself
3531 * is enabled/disabled.
3532 */
3533 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3524 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3534 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3525} 3535}
3526 3536
@@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3609 3619
3610 vlv_display_irq_reset(dev_priv); 3620 vlv_display_irq_reset(dev_priv);
3611 3621
3612 dev_priv->irq_mask = 0; 3622 dev_priv->irq_mask = ~0;
3613} 3623}
3614 3624
3615static void valleyview_irq_uninstall(struct drm_device *dev) 3625static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3715,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3715 if ((iir & flip_pending) == 0) 3725 if ((iir & flip_pending) == 0)
3716 goto check_page_flip; 3726 goto check_page_flip;
3717 3727
3718 intel_prepare_page_flip(dev, plane);
3719
3720 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3728 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3721 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3729 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3722 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3730 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3726,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3726 if (I915_READ16(ISR) & flip_pending) 3734 if (I915_READ16(ISR) & flip_pending)
3727 goto check_page_flip; 3735 goto check_page_flip;
3728 3736
3737 intel_prepare_page_flip(dev, plane);
3729 intel_finish_page_flip(dev, pipe); 3738 intel_finish_page_flip(dev, pipe);
3730 return true; 3739 return true;
3731 3740
@@ -3897,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
3897 if ((iir & flip_pending) == 0) 3906 if ((iir & flip_pending) == 0)
3898 goto check_page_flip; 3907 goto check_page_flip;
3899 3908
3900 intel_prepare_page_flip(dev, plane);
3901
3902 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3909 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3903 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3910 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3904 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3911 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3908,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
3908 if (I915_READ(ISR) & flip_pending) 3915 if (I915_READ(ISR) & flip_pending)
3909 goto check_page_flip; 3916 goto check_page_flip;
3910 3917
3918 intel_prepare_page_flip(dev, plane);
3911 intel_finish_page_flip(dev, pipe); 3919 intel_finish_page_flip(dev, pipe);
3912 return true; 3920 return true;
3913 3921
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index eefdc238f70b..172de3b3433b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -395,6 +395,7 @@
395#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) 395#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
396#define PIPE_CONTROL_CS_STALL (1<<20) 396#define PIPE_CONTROL_CS_STALL (1<<20)
397#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 397#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
398#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
398#define PIPE_CONTROL_QW_WRITE (1<<14) 399#define PIPE_CONTROL_QW_WRITE (1<<14)
399#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) 400#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
400#define PIPE_CONTROL_DEPTH_STALL (1<<13) 401#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -1128,6 +1129,7 @@ enum punit_power_well {
1128#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) 1129#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1129#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) 1130#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1130#define GEN6_NOSYNC 0 1131#define GEN6_NOSYNC 0
1132#define RING_PSMI_CTL(base) ((base)+0x50)
1131#define RING_MAX_IDLE(base) ((base)+0x54) 1133#define RING_MAX_IDLE(base) ((base)+0x54)
1132#define RING_HWS_PGA(base) ((base)+0x80) 1134#define RING_HWS_PGA(base) ((base)+0x80)
1133#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 1135#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1458,6 +1460,7 @@ enum punit_power_well {
1458#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 1460#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1459 1461
1460#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1462#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
1463#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1461#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1464#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1462#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 1465#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1463 1466
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fb3e3d429191..e2af1383b179 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
13057 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 13057 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
13058 udelay(300); 13058 udelay(300);
13059 13059
13060 /* 13060 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
13061 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
13062 * from S3 without preserving (some of?) the other bits.
13063 */
13064 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
13065 POSTING_READ(vga_reg); 13061 POSTING_READ(vga_reg);
13066} 13062}
13067 13063
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
13146 13142
13147 intel_shared_dpll_init(dev); 13143 intel_shared_dpll_init(dev);
13148 13144
13149 /* save the BIOS value before clobbering it */
13150 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
13151 /* Just disable it once at startup */ 13145 /* Just disable it once at startup */
13152 i915_disable_vga(dev); 13146 i915_disable_vga(dev);
13153 intel_setup_outputs(dev); 13147 intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1f4b56e273c8..964b28e3c630 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
6191 valleyview_cleanup_gt_powersave(dev); 6191 valleyview_cleanup_gt_powersave(dev);
6192} 6192}
6193 6193
6194static void gen6_suspend_rps(struct drm_device *dev)
6195{
6196 struct drm_i915_private *dev_priv = dev->dev_private;
6197
6198 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6199
6200 /*
6201 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6202 * is added for it.
6203 */
6204 if (INTEL_INFO(dev)->gen < 9)
6205 gen6_disable_rps_interrupts(dev);
6206}
6207
6194/** 6208/**
6195 * intel_suspend_gt_powersave - suspend PM work and helper threads 6209 * intel_suspend_gt_powersave - suspend PM work and helper threads
6196 * @dev: drm device 6210 * @dev: drm device
@@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
6206 if (INTEL_INFO(dev)->gen < 6) 6220 if (INTEL_INFO(dev)->gen < 6)
6207 return; 6221 return;
6208 6222
6209 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6223 gen6_suspend_rps(dev);
6210
6211 /*
6212 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6213 * is added for it.
6214 */
6215 if (INTEL_INFO(dev)->gen < 9)
6216 gen6_disable_rps_interrupts(dev);
6217 6224
6218 /* Force GPU to min freq during suspend */ 6225 /* Force GPU to min freq during suspend */
6219 gen6_rps_idle(dev_priv); 6226 gen6_rps_idle(dev_priv);
@@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
6316{ 6323{
6317 struct drm_i915_private *dev_priv = dev->dev_private; 6324 struct drm_i915_private *dev_priv = dev->dev_private;
6318 6325
6326 if (INTEL_INFO(dev)->gen < 6)
6327 return;
6328
6329 gen6_suspend_rps(dev);
6319 dev_priv->rps.enabled = false; 6330 dev_priv->rps.enabled = false;
6320 intel_enable_gt_powersave(dev);
6321} 6331}
6322 6332
6323static void ibx_init_clock_gating(struct drm_device *dev) 6333static void ibx_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9f445e9a75d1..c7bc93d28d84 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
365 /* 366 /*
366 * TLB invalidate requires a post-sync write. 367 * TLB invalidate requires a post-sync write.
367 */ 368 */
368 flags |= PIPE_CONTROL_QW_WRITE; 369 flags |= PIPE_CONTROL_QW_WRITE;
369 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 370 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
370 371
372 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
373
371 /* Workaround: we must issue a pipe_control with CS-stall bit 374 /* Workaround: we must issue a pipe_control with CS-stall bit
372 * set before a pipe_control command that has the state cache 375 * set before a pipe_control command that has the state cache
373 * invalidate bit set. */ 376 * invalidate bit set. */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f5a78d53e297..ac6da7102fbb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
615 vlv_power_sequencer_reset(dev_priv); 615 vlv_power_sequencer_reset(dev_priv);
616} 616}
617 617
618static void check_power_well_state(struct drm_i915_private *dev_priv,
619 struct i915_power_well *power_well)
620{
621 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
622
623 if (power_well->always_on || !i915.disable_power_well) {
624 if (!enabled)
625 goto mismatch;
626
627 return;
628 }
629
630 if (enabled != (power_well->count > 0))
631 goto mismatch;
632
633 return;
634
635mismatch:
636 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
637 power_well->name, power_well->always_on, enabled,
638 power_well->count, i915.disable_power_well);
639}
640
641/** 618/**
642 * intel_display_power_get - grab a power domain reference 619 * intel_display_power_get - grab a power domain reference
643 * @dev_priv: i915 device instance 620 * @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
669 power_well->ops->enable(dev_priv, power_well); 646 power_well->ops->enable(dev_priv, power_well);
670 power_well->hw_enabled = true; 647 power_well->hw_enabled = true;
671 } 648 }
672
673 check_power_well_state(dev_priv, power_well);
674 } 649 }
675 650
676 power_domains->domain_use_count[domain]++; 651 power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
709 power_well->hw_enabled = false; 684 power_well->hw_enabled = false;
710 power_well->ops->disable(dev_priv, power_well); 685 power_well->ops->disable(dev_priv, power_well);
711 } 686 }
712
713 check_power_well_state(dev_priv, power_well);
714 } 687 }
715 688
716 mutex_unlock(&power_domains->lock); 689 mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index aa873048308b..94a5bee69fe7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
387 drm_gem_object_unreference(gpu->memptrs_bo); 387 drm_gem_object_unreference(gpu->memptrs_bo);
388 } 388 }
389 if (gpu->pm4) 389 release_firmware(gpu->pm4);
390 release_firmware(gpu->pm4); 390 release_firmware(gpu->pfp);
391 if (gpu->pfp)
392 release_firmware(gpu->pfp);
393 msm_gpu_cleanup(&gpu->base); 391 msm_gpu_cleanup(&gpu->base);
394} 392}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index fbebb0405d76..b4e70e0e3cfa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
141 uint32_t hpd_ctrl; 141 uint32_t hpd_ctrl;
142 int i, ret; 142 int i, ret;
143 143
144 for (i = 0; i < config->hpd_reg_cnt; i++) {
145 ret = regulator_enable(hdmi->hpd_regs[i]);
146 if (ret) {
147 dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
148 config->hpd_reg_names[i], ret);
149 goto fail;
150 }
151 }
152
144 ret = gpio_config(hdmi, true); 153 ret = gpio_config(hdmi, true);
145 if (ret) { 154 if (ret) {
146 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); 155 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
164 } 173 }
165 } 174 }
166 175
167 for (i = 0; i < config->hpd_reg_cnt; i++) {
168 ret = regulator_enable(hdmi->hpd_regs[i]);
169 if (ret) {
170 dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
171 config->hpd_reg_names[i], ret);
172 goto fail;
173 }
174 }
175
176 hdmi_set_mode(hdmi, false); 176 hdmi_set_mode(hdmi, false);
177 phy->funcs->reset(phy); 177 phy->funcs->reset(phy);
178 hdmi_set_mode(hdmi, true); 178 hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@ fail:
200 return ret; 200 return ret;
201} 201}
202 202
203static int hdp_disable(struct hdmi_connector *hdmi_connector) 203static void hdp_disable(struct hdmi_connector *hdmi_connector)
204{ 204{
205 struct hdmi *hdmi = hdmi_connector->hdmi; 205 struct hdmi *hdmi = hdmi_connector->hdmi;
206 const struct hdmi_platform_config *config = hdmi->config; 206 const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
212 212
213 hdmi_set_mode(hdmi, false); 213 hdmi_set_mode(hdmi, false);
214 214
215 for (i = 0; i < config->hpd_reg_cnt; i++) {
216 ret = regulator_disable(hdmi->hpd_regs[i]);
217 if (ret) {
218 dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
219 config->hpd_reg_names[i], ret);
220 goto fail;
221 }
222 }
223
224 for (i = 0; i < config->hpd_clk_cnt; i++) 215 for (i = 0; i < config->hpd_clk_cnt; i++)
225 clk_disable_unprepare(hdmi->hpd_clks[i]); 216 clk_disable_unprepare(hdmi->hpd_clks[i]);
226 217
227 ret = gpio_config(hdmi, false); 218 ret = gpio_config(hdmi, false);
228 if (ret) { 219 if (ret)
229 dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); 220 dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
230 goto fail;
231 }
232
233 return 0;
234 221
235fail: 222 for (i = 0; i < config->hpd_reg_cnt; i++) {
236 return ret; 223 ret = regulator_disable(hdmi->hpd_regs[i]);
224 if (ret)
225 dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
226 config->hpd_reg_names[i], ret);
227 }
237} 228}
238 229
239static void 230static void
@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
260 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { 251 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
261 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); 252 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
262 253
263 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); 254 /* ack & disable (temporarily) HPD events: */
264
265 /* ack the irq: */
266 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 255 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
267 hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); 256 HDMI_HPD_INT_CTRL_INT_ACK);
257
258 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
268 259
269 /* detect disconnect if we are connected or visa versa: */ 260 /* detect disconnect if we are connected or visa versa: */
270 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; 261 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index a7672e100d8b..3449213f1e76 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
331 struct drm_crtc_state *state) 331 struct drm_crtc_state *state)
332{ 332{
333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
334 struct drm_device *dev = crtc->dev;
335
336 DBG("%s: check", mdp4_crtc->name); 334 DBG("%s: check", mdp4_crtc->name);
337
338 if (mdp4_crtc->event) {
339 dev_err(dev->dev, "already pending flip!\n");
340 return -EBUSY;
341 }
342
343 // TODO anything else to check? 335 // TODO anything else to check?
344
345 return 0; 336 return 0;
346} 337}
347 338
@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
357 struct drm_device *dev = crtc->dev; 348 struct drm_device *dev = crtc->dev;
358 unsigned long flags; 349 unsigned long flags;
359 350
360 DBG("%s: flush", mdp4_crtc->name); 351 DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
361 352
362 WARN_ON(mdp4_crtc->event); 353 WARN_ON(mdp4_crtc->event);
363 354
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0e9a2e3a82d7..f021f960a8a2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
303 303
304 DBG("%s: check", mdp5_crtc->name); 304 DBG("%s: check", mdp5_crtc->name);
305 305
306 if (mdp5_crtc->event) {
307 dev_err(dev->dev, "already pending flip!\n");
308 return -EBUSY;
309 }
310
311 /* request a free CTL, if none is already allocated for this CRTC */ 306 /* request a free CTL, if none is already allocated for this CRTC */
312 if (state->enable && !mdp5_crtc->ctl) { 307 if (state->enable && !mdp5_crtc->ctl) {
313 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); 308 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
364 struct drm_device *dev = crtc->dev; 359 struct drm_device *dev = crtc->dev;
365 unsigned long flags; 360 unsigned long flags;
366 361
367 DBG("%s: flush", mdp5_crtc->name); 362 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
368 363
369 WARN_ON(mdp5_crtc->event); 364 WARN_ON(mdp5_crtc->event);
370 365
@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
460 /* now that we know what irq's we want: */ 455 /* now that we know what irq's we want: */
461 mdp5_crtc->err.irqmask = intf2err(intf); 456 mdp5_crtc->err.irqmask = intf2err(intf);
462 mdp5_crtc->vblank.irqmask = intf2vblank(intf); 457 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
463 458 mdp_irq_update(&mdp5_kms->base);
464 /* when called from modeset_init(), skip the rest until later: */
465 if (!mdp5_kms)
466 return;
467 459
468 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 460 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
469 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 461 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index a11f1b80c488..9f01a4f21af2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
216 goto fail; 216 goto fail;
217 } 217 }
218 218
219 /* NOTE: the vsync and error irq's are actually associated with 219 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
220 * the INTF/encoder.. the easiest way to deal with this (ie. what
221 * we do now) is assume a fixed relationship between crtc's and
222 * encoders. I'm not sure if there is ever a need to more freely
223 * assign crtcs to encoders, but if there is then we need to take
224 * care of error and vblank irq's that the crtc has registered,
225 * and also update user-requested vblank_mask.
226 */
227 encoder->possible_crtcs = BIT(0);
228 mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
229
230 priv->encoders[priv->num_encoders++] = encoder; 220 priv->encoders[priv->num_encoders++] = encoder;
231 221
232 /* Construct bridge/connector for HDMI: */ 222 /* Construct bridge/connector for HDMI: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 03455b64a245..2a731722d840 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); 42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
43} 43}
44 44
45static void update_irq_unlocked(struct mdp_kms *mdp_kms) 45/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
46 * link changes, this must be called to figure out the new global irqmask
47 */
48void mdp_irq_update(struct mdp_kms *mdp_kms)
46{ 49{
47 unsigned long flags; 50 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags); 51 spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
122 spin_unlock_irqrestore(&list_lock, flags); 125 spin_unlock_irqrestore(&list_lock, flags);
123 126
124 if (needs_update) 127 if (needs_update)
125 update_irq_unlocked(mdp_kms); 128 mdp_irq_update(mdp_kms);
126} 129}
127 130
128void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) 131void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
141 spin_unlock_irqrestore(&list_lock, flags); 144 spin_unlock_irqrestore(&list_lock, flags);
142 145
143 if (needs_update) 146 if (needs_update)
144 update_irq_unlocked(mdp_kms); 147 mdp_irq_update(mdp_kms);
145} 148}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 99557b5ad4fd..b268ce95d394 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
75void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); 75void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
76void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); 76void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
77void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); 77void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
78 78void mdp_irq_update(struct mdp_kms *mdp_kms);
79 79
80/* 80/*
81 * pixel format helpers: 81 * pixel format helpers:
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0de412e13dc..191968256c58 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -23,10 +23,41 @@ struct msm_commit {
23 struct drm_atomic_state *state; 23 struct drm_atomic_state *state;
24 uint32_t fence; 24 uint32_t fence;
25 struct msm_fence_cb fence_cb; 25 struct msm_fence_cb fence_cb;
26 uint32_t crtc_mask;
26}; 27};
27 28
28static void fence_cb(struct msm_fence_cb *cb); 29static void fence_cb(struct msm_fence_cb *cb);
29 30
31/* block until specified crtcs are no longer pending update, and
32 * atomically mark them as pending update
33 */
34static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
35{
36 int ret;
37
38 spin_lock(&priv->pending_crtcs_event.lock);
39 ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
40 !(priv->pending_crtcs & crtc_mask));
41 if (ret == 0) {
42 DBG("start: %08x", crtc_mask);
43 priv->pending_crtcs |= crtc_mask;
44 }
45 spin_unlock(&priv->pending_crtcs_event.lock);
46
47 return ret;
48}
49
50/* clear specified crtcs (no longer pending update)
51 */
52static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
53{
54 spin_lock(&priv->pending_crtcs_event.lock);
55 DBG("end: %08x", crtc_mask);
56 priv->pending_crtcs &= ~crtc_mask;
57 wake_up_all_locked(&priv->pending_crtcs_event);
58 spin_unlock(&priv->pending_crtcs_event.lock);
59}
60
30static struct msm_commit *new_commit(struct drm_atomic_state *state) 61static struct msm_commit *new_commit(struct drm_atomic_state *state)
31{ 62{
32 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); 63 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
58 89
59 drm_atomic_helper_commit_post_planes(dev, state); 90 drm_atomic_helper_commit_post_planes(dev, state);
60 91
92 /* NOTE: _wait_for_vblanks() only waits for vblank on
93 * enabled CRTCs. So we end up faulting when disabling
94 * due to (potentially) unref'ing the outgoing fb's
95 * before the vblank when the disable has latched.
96 *
97 * But if it did wait on disabled (or newly disabled)
98 * CRTCs, that would be racy (ie. we could have missed
99 * the irq. We need some way to poll for pipe shut
100 * down. Or just live with occasionally hitting the
101 * timeout in the CRTC disable path (which really should
102 * not be critical path)
103 */
104
61 drm_atomic_helper_wait_for_vblanks(dev, state); 105 drm_atomic_helper_wait_for_vblanks(dev, state);
62 106
63 drm_atomic_helper_cleanup_planes(dev, state); 107 drm_atomic_helper_cleanup_planes(dev, state);
64 108
65 drm_atomic_state_free(state); 109 drm_atomic_state_free(state);
66 110
111 end_atomic(dev->dev_private, c->crtc_mask);
112
67 kfree(c); 113 kfree(c);
68} 114}
69 115
@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
97int msm_atomic_commit(struct drm_device *dev, 143int msm_atomic_commit(struct drm_device *dev,
98 struct drm_atomic_state *state, bool async) 144 struct drm_atomic_state *state, bool async)
99{ 145{
100 struct msm_commit *c;
101 int nplanes = dev->mode_config.num_total_plane; 146 int nplanes = dev->mode_config.num_total_plane;
147 int ncrtcs = dev->mode_config.num_crtc;
148 struct msm_commit *c;
102 int i, ret; 149 int i, ret;
103 150
104 ret = drm_atomic_helper_prepare_planes(dev, state); 151 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
106 return ret; 153 return ret;
107 154
108 c = new_commit(state); 155 c = new_commit(state);
156 if (!c)
157 return -ENOMEM;
158
159 /*
160 * Figure out what crtcs we have:
161 */
162 for (i = 0; i < ncrtcs; i++) {
163 struct drm_crtc *crtc = state->crtcs[i];
164 if (!crtc)
165 continue;
166 c->crtc_mask |= (1 << drm_crtc_index(crtc));
167 }
109 168
110 /* 169 /*
111 * Figure out what fence to wait for: 170 * Figure out what fence to wait for:
@@ -122,6 +181,14 @@ int msm_atomic_commit(struct drm_device *dev,
122 } 181 }
123 182
124 /* 183 /*
184 * Wait for pending updates on any of the same crtc's and then
185 * mark our set of crtc's as busy:
186 */
187 ret = start_atomic(dev->dev_private, c->crtc_mask);
188 if (ret)
189 return ret;
190
191 /*
125 * This is the point of no return - everything below never fails except 192 * This is the point of no return - everything below never fails except
126 * when the hw goes bonghits. Which means we can commit the new state on 193 * when the hw goes bonghits. Which means we can commit the new state on
127 * the software side now. 194 * the software side now.
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c795217e1bfc..9a61546a0b05 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
193 193
194 priv->wq = alloc_ordered_workqueue("msm", 0); 194 priv->wq = alloc_ordered_workqueue("msm", 0);
195 init_waitqueue_head(&priv->fence_event); 195 init_waitqueue_head(&priv->fence_event);
196 init_waitqueue_head(&priv->pending_crtcs_event);
196 197
197 INIT_LIST_HEAD(&priv->inactive_list); 198 INIT_LIST_HEAD(&priv->inactive_list);
198 INIT_LIST_HEAD(&priv->fence_cbs); 199 INIT_LIST_HEAD(&priv->fence_cbs);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 136303818436..b69ef2d5a26c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -96,6 +96,10 @@ struct msm_drm_private {
96 /* callbacks deferred until bo is inactive: */ 96 /* callbacks deferred until bo is inactive: */
97 struct list_head fence_cbs; 97 struct list_head fence_cbs;
98 98
99 /* crtcs pending async atomic updates: */
100 uint32_t pending_crtcs;
101 wait_queue_head_t pending_crtcs_event;
102
99 /* registered MMUs: */ 103 /* registered MMUs: */
100 unsigned int num_mmus; 104 unsigned int num_mmus;
101 struct msm_mmu *mmus[NUM_DOMAINS]; 105 struct msm_mmu *mmus[NUM_DOMAINS];
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 94d55e526b4e..1f3af13ccede 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -190,8 +190,7 @@ fail_unlock:
190fail: 190fail:
191 191
192 if (ret) { 192 if (ret) {
193 if (fbi) 193 framebuffer_release(fbi);
194 framebuffer_release(fbi);
195 if (fb) { 194 if (fb) {
196 drm_framebuffer_unregister_private(fb); 195 drm_framebuffer_unregister_private(fb);
197 drm_framebuffer_remove(fb); 196 drm_framebuffer_remove(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4a6f0e49d5b5..49dea4fb55ac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
535 drm_free_large(msm_obj->pages); 535 drm_free_large(msm_obj->pages);
536 536
537 } else { 537 } else {
538 if (msm_obj->vaddr) 538 vunmap(msm_obj->vaddr);
539 vunmap(msm_obj->vaddr);
540 put_pages(obj); 539 put_pages(obj);
541 } 540 }
542 541
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ff2b434b3db4..760947e380c9 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -26,7 +26,7 @@
26void 26void
27nvkm_event_put(struct nvkm_event *event, u32 types, int index) 27nvkm_event_put(struct nvkm_event *event, u32 types, int index)
28{ 28{
29 BUG_ON(!spin_is_locked(&event->refs_lock)); 29 assert_spin_locked(&event->refs_lock);
30 while (types) { 30 while (types) {
31 int type = __ffs(types); types &= ~(1 << type); 31 int type = __ffs(types); types &= ~(1 << type);
32 if (--event->refs[index * event->types_nr + type] == 0) { 32 if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
39void 39void
40nvkm_event_get(struct nvkm_event *event, u32 types, int index) 40nvkm_event_get(struct nvkm_event *event, u32 types, int index)
41{ 41{
42 BUG_ON(!spin_is_locked(&event->refs_lock)); 42 assert_spin_locked(&event->refs_lock);
43 while (types) { 43 while (types) {
44 int type = __ffs(types); types &= ~(1 << type); 44 int type = __ffs(types); types &= ~(1 << type);
45 if (++event->refs[index * event->types_nr + type] == 1) { 45 if (++event->refs[index * event->types_nr + type] == 1) {
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
index d1bcde55e9d7..839a32577680 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
98 struct nvkm_event *event = notify->event; 98 struct nvkm_event *event = notify->event;
99 unsigned long flags; 99 unsigned long flags;
100 100
101 BUG_ON(!spin_is_locked(&event->list_lock)); 101 assert_spin_locked(&event->list_lock);
102 BUG_ON(size != notify->size); 102 BUG_ON(size != notify->size);
103 103
104 spin_lock_irqsave(&event->refs_lock, flags); 104 spin_lock_irqsave(&event->refs_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 674da1f095b2..732922690653 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
249 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 249 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
250 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass; 250 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
251 break; 251 break;
252 case 0x106:
253 device->cname = "GK208B";
254 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
255 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
256 device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
257 device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
258 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
259 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
261 device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
262 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
263 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
264 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
265 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
266 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
267 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
268 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
269 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
270 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
271 device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
272 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
273 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
274 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
275 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
276 device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
277 device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
278 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
279 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
280 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
281 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
282 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
283 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
284 break;
252 case 0x108: 285 case 0x108:
253 device->cname = "GK208"; 286 device->cname = "GK208";
254 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 287 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
index 5e58bba0dd5c..a7a890fad1e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -44,8 +44,10 @@ static void
44pramin_fini(void *data) 44pramin_fini(void *data)
45{ 45{
46 struct priv *priv = data; 46 struct priv *priv = data;
47 nv_wr32(priv->bios, 0x001700, priv->bar0); 47 if (priv) {
48 kfree(priv); 48 nv_wr32(priv->bios, 0x001700, priv->bar0);
49 kfree(priv);
50 }
49} 51}
50 52
51static void * 53static void *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
index 00f2ca7e44a5..033a8e999497 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -24,34 +24,71 @@
24 24
25#include "nv50.h" 25#include "nv50.h"
26 26
27struct nvaa_ram_priv {
28 struct nouveau_ram base;
29 u64 poller_base;
30};
31
27static int 32static int
28nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 33nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
29 struct nouveau_oclass *oclass, void *data, u32 datasize, 34 struct nouveau_oclass *oclass, void *data, u32 datasize,
30 struct nouveau_object **pobject) 35 struct nouveau_object **pobject)
31{ 36{
32 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 37 u32 rsvd_head = ( 256 * 1024); /* vga memory */
33 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 38 u32 rsvd_tail = (1024 * 1024); /* vbios etc */
34 struct nouveau_fb *pfb = nouveau_fb(parent); 39 struct nouveau_fb *pfb = nouveau_fb(parent);
35 struct nouveau_ram *ram; 40 struct nvaa_ram_priv *priv;
36 int ret; 41 int ret;
37 42
38 ret = nouveau_ram_create(parent, engine, oclass, &ram); 43 ret = nouveau_ram_create(parent, engine, oclass, &priv);
39 *pobject = nv_object(ram); 44 *pobject = nv_object(priv);
40 if (ret) 45 if (ret)
41 return ret; 46 return ret;
42 47
43 ram->size = nv_rd32(pfb, 0x10020c); 48 priv->base.type = NV_MEM_TYPE_STOLEN;
44 ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32); 49 priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
50 priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
45 51
46 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) - 52 rsvd_tail += 0x1000;
47 (rsvd_head + rsvd_tail), 1); 53 priv->poller_base = priv->base.size - rsvd_tail;
54
55 ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
56 (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
57 1);
48 if (ret) 58 if (ret)
49 return ret; 59 return ret;
50 60
51 ram->type = NV_MEM_TYPE_STOLEN; 61 priv->base.get = nv50_ram_get;
52 ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 62 priv->base.put = nv50_ram_put;
53 ram->get = nv50_ram_get; 63 return 0;
54 ram->put = nv50_ram_put; 64}
65
66static int
67nvaa_ram_init(struct nouveau_object *object)
68{
69 struct nouveau_fb *pfb = nouveau_fb(object);
70 struct nvaa_ram_priv *priv = (void *)object;
71 int ret;
72 u64 dniso, hostnb, flush;
73
74 ret = nouveau_ram_init(&priv->base);
75 if (ret)
76 return ret;
77
78 dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
79 hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
80 flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
81
82 /* Enable NISO poller for various clients and set their associated
83 * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
84 */
85 nv_wr32(pfb, 0x100c18, dniso);
86 nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
87 nv_wr32(pfb, 0x100c1c, hostnb);
88 nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
89 nv_wr32(pfb, 0x100c24, flush);
90 nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
91
55 return 0; 92 return 0;
56} 93}
57 94
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
60 .ofuncs = &(struct nouveau_ofuncs) { 97 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nvaa_ram_ctor, 98 .ctor = nvaa_ram_ctor,
62 .dtor = _nouveau_ram_dtor, 99 .dtor = _nouveau_ram_dtor,
63 .init = _nouveau_ram_init, 100 .init = nvaa_ram_init,
64 .fini = _nouveau_ram_fini, 101 .fini = _nouveau_ram_fini,
65 }, 102 },
66}; 103};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
index a75c35ccf25c..165401c4045c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -24,13 +24,6 @@
24 24
25#include "nv04.h" 25#include "nv04.h"
26 26
27static void
28nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
29{
30 struct nv04_mc_priv *priv = (void *)pmc;
31 nv_wr08(priv, 0x088050, 0xff);
32}
33
34struct nouveau_oclass * 27struct nouveau_oclass *
35nv4c_mc_oclass = &(struct nouveau_mc_oclass) { 28nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
36 .base.handle = NV_SUBDEV(MC, 0x4c), 29 .base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
41 .fini = _nouveau_mc_fini, 34 .fini = _nouveau_mc_fini,
42 }, 35 },
43 .intr = nv04_mc_intr, 36 .intr = nv04_mc_intr,
44 .msi_rearm = nv4c_mc_msi_rearm,
45}.base; 37}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 21ec561edc99..bba2960d3dfb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1572 * so use the DMA API for them. 1572 * so use the DMA API for them.
1573 */ 1573 */
1574 if (!nv_device_is_cpu_coherent(device) && 1574 if (!nv_device_is_cpu_coherent(device) &&
1575 ttm->caching_state == tt_uncached) 1575 ttm->caching_state == tt_uncached) {
1576 ttm_dma_unpopulate(ttm_dma, dev->dev); 1576 ttm_dma_unpopulate(ttm_dma, dev->dev);
1577 return;
1578 }
1577 1579
1578#if __OS_HAS_AGP 1580#if __OS_HAS_AGP
1579 if (drm->agp.stat == ENABLED) { 1581 if (drm->agp.stat == ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5d93902a91ab..f8042433752b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
876 if (ret) 876 if (ret)
877 return ret; 877 return ret;
878 878
879 bo->gem.dumb = true;
880 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); 879 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
881 drm_gem_object_unreference_unlocked(&bo->gem); 880 drm_gem_object_unreference_unlocked(&bo->gem);
882 return ret; 881 return ret;
@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
892 gem = drm_gem_object_lookup(dev, file_priv, handle); 891 gem = drm_gem_object_lookup(dev, file_priv, handle);
893 if (gem) { 892 if (gem) {
894 struct nouveau_bo *bo = nouveau_gem_object(gem); 893 struct nouveau_bo *bo = nouveau_gem_object(gem);
895
896 /*
897 * We don't allow dumb mmaps on objects created using another
898 * interface.
899 */
900 WARN_ONCE(!(gem->dumb || gem->import_attach),
901 "Illegal dumb map of accelerated buffer.\n");
902
903 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 894 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
904 drm_gem_object_unreference_unlocked(gem); 895 drm_gem_object_unreference_unlocked(gem);
905 return 0; 896 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 28d51a22a4bf..bf0f9e21d714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -36,7 +36,14 @@ void
36nouveau_gem_object_del(struct drm_gem_object *gem) 36nouveau_gem_object_del(struct drm_gem_object *gem)
37{ 37{
38 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 38 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
39 struct ttm_buffer_object *bo = &nvbo->bo; 40 struct ttm_buffer_object *bo = &nvbo->bo;
41 struct device *dev = drm->dev->dev;
42 int ret;
43
44 ret = pm_runtime_get_sync(dev);
45 if (WARN_ON(ret < 0 && ret != -EACCES))
46 return;
40 47
41 if (gem->import_attach) 48 if (gem->import_attach)
42 drm_prime_gem_destroy(gem, nvbo->bo.sg); 49 drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
46 /* reset filp so nouveau_bo_del_ttm() can test for it */ 53 /* reset filp so nouveau_bo_del_ttm() can test for it */
47 gem->filp = NULL; 54 gem->filp = NULL;
48 ttm_bo_unref(&bo); 55 ttm_bo_unref(&bo);
56
57 pm_runtime_mark_last_busy(dev);
58 pm_runtime_put_autosuspend(dev);
49} 59}
50 60
51int 61int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
53{ 63{
54 struct nouveau_cli *cli = nouveau_cli(file_priv); 64 struct nouveau_cli *cli = nouveau_cli(file_priv);
55 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 65 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
66 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
56 struct nouveau_vma *vma; 67 struct nouveau_vma *vma;
68 struct device *dev = drm->dev->dev;
57 int ret; 69 int ret;
58 70
59 if (!cli->vm) 71 if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
71 goto out; 83 goto out;
72 } 84 }
73 85
86 ret = pm_runtime_get_sync(dev);
87 if (ret < 0 && ret != -EACCES)
88 goto out;
89
74 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 90 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
75 if (ret) { 91 if (ret)
76 kfree(vma); 92 kfree(vma);
77 goto out; 93
78 } 94 pm_runtime_mark_last_busy(dev);
95 pm_runtime_put_autosuspend(dev);
79 } else { 96 } else {
80 vma->refcount++; 97 vma->refcount++;
81 } 98 }
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
129{ 146{
130 struct nouveau_cli *cli = nouveau_cli(file_priv); 147 struct nouveau_cli *cli = nouveau_cli(file_priv);
131 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 148 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
149 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
150 struct device *dev = drm->dev->dev;
132 struct nouveau_vma *vma; 151 struct nouveau_vma *vma;
133 int ret; 152 int ret;
134 153
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
141 160
142 vma = nouveau_bo_vma_find(nvbo, cli->vm); 161 vma = nouveau_bo_vma_find(nvbo, cli->vm);
143 if (vma) { 162 if (vma) {
144 if (--vma->refcount == 0) 163 if (--vma->refcount == 0) {
145 nouveau_gem_object_unmap(nvbo, vma); 164 ret = pm_runtime_get_sync(dev);
165 if (!WARN_ON(ret < 0 && ret != -EACCES)) {
166 nouveau_gem_object_unmap(nvbo, vma);
167 pm_runtime_mark_last_busy(dev);
168 pm_runtime_put_autosuspend(dev);
169 }
170 }
146 } 171 }
147 ttm_bo_unreserve(&nvbo->bo); 172 ttm_bo_unreserve(&nvbo->bo);
148} 173}
@@ -444,9 +469,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
444 list_for_each_entry(nvbo, list, entry) { 469 list_for_each_entry(nvbo, list, entry) {
445 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 470 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
446 471
447 WARN_ONCE(nvbo->gem.dumb,
448 "GPU use of dumb buffer is illegal.\n");
449
450 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, 472 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
451 b->write_domains, 473 b->write_domains,
452 b->valid_domains); 474 b->valid_domains);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 753a6def61e7..3d1cfcb96b6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,7 @@
28#include "nouveau_ttm.h" 28#include "nouveau_ttm.h"
29#include "nouveau_gem.h" 29#include "nouveau_gem.h"
30 30
31#include "drm_legacy.h"
31static int 32static int
32nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 33nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
33{ 34{
@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 282 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
282 283
283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 284 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
284 return -EINVAL; 285 return drm_legacy_mmap(filp, vma);
285 286
286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 287 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
287} 288}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d59ec491dbb9..ed644a4f6f57 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1851 return pll; 1851 return pll;
1852 } 1852 }
1853 /* otherwise, pick one of the plls */ 1853 /* otherwise, pick one of the plls */
1854 if ((rdev->family == CHIP_KAVERI) || 1854 if ((rdev->family == CHIP_KABINI) ||
1855 (rdev->family == CHIP_KABINI) ||
1856 (rdev->family == CHIP_MULLINS)) { 1855 (rdev->family == CHIP_MULLINS)) {
1857 /* KB/KV/ML has PPLL1 and PPLL2 */ 1856 /* KB/ML has PPLL1 and PPLL2 */
1858 pll_in_use = radeon_get_pll_use_mask(crtc); 1857 pll_in_use = radeon_get_pll_use_mask(crtc);
1859 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1858 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1860 return ATOM_PPLL2; 1859 return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1863 DRM_ERROR("unable to allocate a PPLL\n"); 1862 DRM_ERROR("unable to allocate a PPLL\n");
1864 return ATOM_PPLL_INVALID; 1863 return ATOM_PPLL_INVALID;
1865 } else { 1864 } else {
1866 /* CI has PPLL0, PPLL1, and PPLL2 */ 1865 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
1867 pll_in_use = radeon_get_pll_use_mask(crtc); 1866 pll_in_use = radeon_get_pll_use_mask(crtc);
1868 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1867 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1869 return ATOM_PPLL2; 1868 return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
2155 case ATOM_PPLL0: 2154 case ATOM_PPLL0:
2156 /* disable the ppll */ 2155 /* disable the ppll */
2157 if ((rdev->family == CHIP_ARUBA) || 2156 if ((rdev->family == CHIP_ARUBA) ||
2157 (rdev->family == CHIP_KAVERI) ||
2158 (rdev->family == CHIP_BONAIRE) || 2158 (rdev->family == CHIP_BONAIRE) ||
2159 (rdev->family == CHIP_HAWAII)) 2159 (rdev->family == CHIP_HAWAII))
2160 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 2160 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 11ba9d21b89b..db42a670f995 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
492 struct radeon_connector_atom_dig *dig_connector; 492 struct radeon_connector_atom_dig *dig_connector;
493 int dp_clock; 493 int dp_clock;
494 494
495 if ((mode->clock > 340000) &&
496 (!radeon_connector_is_dp12_capable(connector)))
497 return MODE_CLOCK_HIGH;
498
495 if (!radeon_connector->con_priv) 499 if (!radeon_connector->con_priv)
496 return MODE_CLOCK_HIGH; 500 return MODE_CLOCK_HIGH;
497 dig_connector = radeon_connector->con_priv; 501 dig_connector = radeon_connector->con_priv;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ba85986febea..03003f8a6de6 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2156,4 +2156,6 @@
2156#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu 2156#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
2157#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u 2157#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
2158 2158
2159#define IH_VMID_0_LUT 0x3D40u
2160
2159#endif 2161#endif
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 2fe8cfc966d9..bafdf92a5732 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
103 } 103 }
104 104
105 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 105 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
106 if (sad_count < 0) { 106 if (sad_count <= 0) {
107 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 107 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
108 return; 108 return;
109 } 109 }
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9b42001295ba..e3e9c10cfba9 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
2745 pi->enable_auto_thermal_throttling = true; 2745 pi->enable_auto_thermal_throttling = true;
2746 pi->disable_nb_ps3_in_battery = false; 2746 pi->disable_nb_ps3_in_battery = false;
2747 if (radeon_bapm == -1) { 2747 if (radeon_bapm == -1) {
2748 /* There are stability issues reported on with 2748 /* only enable bapm on KB, ML by default */
2749 * bapm enabled on an asrock system. 2749 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2750 */
2751 if (rdev->pdev->subsystem_vendor == 0x1849)
2752 pi->bapm_enable = false;
2753 else
2754 pi->bapm_enable = true; 2750 pi->bapm_enable = true;
2751 else
2752 pi->bapm_enable = false;
2755 } else if (radeon_bapm == 0) { 2753 } else if (radeon_bapm == 0) {
2756 pi->bapm_enable = false; 2754 pi->bapm_enable = false;
2757 } else { 2755 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe48f229043e..a46f73737994 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
394 return r; 394 return r;
395} 395}
396 396
397static int radeon_mode_mmap(struct drm_file *filp, 397int radeon_mode_dumb_mmap(struct drm_file *filp,
398 struct drm_device *dev, 398 struct drm_device *dev,
399 uint32_t handle, bool dumb, 399 uint32_t handle, uint64_t *offset_p)
400 uint64_t *offset_p)
401{ 400{
402 struct drm_gem_object *gobj; 401 struct drm_gem_object *gobj;
403 struct radeon_bo *robj; 402 struct radeon_bo *robj;
@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
406 if (gobj == NULL) { 405 if (gobj == NULL) {
407 return -ENOENT; 406 return -ENOENT;
408 } 407 }
409
410 /*
411 * We don't allow dumb mmaps on objects created using another
412 * interface.
413 */
414 WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
415 "Illegal dumb map of GPU buffer.\n");
416
417 robj = gem_to_radeon_bo(gobj); 408 robj = gem_to_radeon_bo(gobj);
418 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 409 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
419 drm_gem_object_unreference_unlocked(gobj); 410 drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
424 return 0; 415 return 0;
425} 416}
426 417
427int radeon_mode_dumb_mmap(struct drm_file *filp,
428 struct drm_device *dev,
429 uint32_t handle, uint64_t *offset_p)
430{
431 return radeon_mode_mmap(filp, dev, handle, true, offset_p);
432}
433
434int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 418int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *filp) 419 struct drm_file *filp)
436{ 420{
437 struct drm_radeon_gem_mmap *args = data; 421 struct drm_radeon_gem_mmap *args = data;
438 422
439 return radeon_mode_mmap(filp, dev, args->handle, false, 423 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
440 &args->addr_ptr);
441} 424}
442 425
443int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 426int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
763 return -ENOMEM; 746 return -ENOMEM;
764 747
765 r = drm_gem_handle_create(file_priv, gobj, &handle); 748 r = drm_gem_handle_create(file_priv, gobj, &handle);
766 gobj->dumb = true;
767 /* drop reference from allocate - handle holds it now */ 749 /* drop reference from allocate - handle holds it now */
768 drm_gem_object_unreference_unlocked(gobj); 750 drm_gem_object_unreference_unlocked(gobj);
769 if (r) { 751 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 065d02068ec3..8bf87f1203cc 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -28,6 +28,8 @@
28#include "cikd.h" 28#include "cikd.h"
29#include "cik_reg.h" 29#include "cik_reg.h"
30#include "radeon_kfd.h" 30#include "radeon_kfd.h"
31#include "radeon_ucode.h"
32#include <linux/firmware.h>
31 33
32#define CIK_PIPE_PER_MEC (4) 34#define CIK_PIPE_PER_MEC (4)
33 35
@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
49static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); 51static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
50 52
51static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); 53static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
54static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
52 55
53/* 56/*
54 * Register access functions 57 * Register access functions
@@ -69,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
69static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 72static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
70 uint32_t queue_id, uint32_t __user *wptr); 73 uint32_t queue_id, uint32_t __user *wptr);
71 74
72static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, 75static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
73 uint32_t pipe_id, uint32_t queue_id); 76 uint32_t pipe_id, uint32_t queue_id);
74 77
75static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, 78static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -89,14 +92,16 @@ static const struct kfd2kgd_calls kfd2kgd = {
89 .init_memory = kgd_init_memory, 92 .init_memory = kgd_init_memory,
90 .init_pipeline = kgd_init_pipeline, 93 .init_pipeline = kgd_init_pipeline,
91 .hqd_load = kgd_hqd_load, 94 .hqd_load = kgd_hqd_load,
92 .hqd_is_occupies = kgd_hqd_is_occupies, 95 .hqd_is_occupied = kgd_hqd_is_occupied,
93 .hqd_destroy = kgd_hqd_destroy, 96 .hqd_destroy = kgd_hqd_destroy,
97 .get_fw_version = get_fw_version
94}; 98};
95 99
96static const struct kgd2kfd_calls *kgd2kfd; 100static const struct kgd2kfd_calls *kgd2kfd;
97 101
98bool radeon_kfd_init(void) 102bool radeon_kfd_init(void)
99{ 103{
104#if defined(CONFIG_HSA_AMD_MODULE)
100 bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, 105 bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
101 const struct kgd2kfd_calls**); 106 const struct kgd2kfd_calls**);
102 107
@@ -113,6 +118,17 @@ bool radeon_kfd_init(void)
113 } 118 }
114 119
115 return true; 120 return true;
121#elif defined(CONFIG_HSA_AMD)
122 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
123 kgd2kfd = NULL;
124
125 return false;
126 }
127
128 return true;
129#else
130 return false;
131#endif
116} 132}
117 133
118void radeon_kfd_fini(void) 134void radeon_kfd_fini(void)
@@ -374,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
374 cpu_relax(); 390 cpu_relax();
375 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); 391 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
376 392
393 /* Mapping vmid to pasid also for IH block */
394 write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
395 pasid_mapping);
396
377 return 0; 397 return 0;
378} 398}
379 399
@@ -513,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
513 return 0; 533 return 0;
514} 534}
515 535
516static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, 536static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
517 uint32_t pipe_id, uint32_t queue_id) 537 uint32_t pipe_id, uint32_t queue_id)
518{ 538{
519 uint32_t act; 539 uint32_t act;
@@ -552,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
552 if (timeout == 0) { 572 if (timeout == 0) {
553 pr_err("kfd: cp queue preemption time out (%dms)\n", 573 pr_err("kfd: cp queue preemption time out (%dms)\n",
554 temp); 574 temp);
575 release_queue(kgd);
555 return -ETIME; 576 return -ETIME;
556 } 577 }
557 msleep(20); 578 msleep(20);
@@ -561,3 +582,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
561 release_queue(kgd); 582 release_queue(kgd);
562 return 0; 583 return 0;
563} 584}
585
586static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
587{
588 struct radeon_device *rdev = (struct radeon_device *) kgd;
589 const union radeon_firmware_header *hdr;
590
591 BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
592
593 switch (type) {
594 case KGD_ENGINE_PFP:
595 hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
596 break;
597
598 case KGD_ENGINE_ME:
599 hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
600 break;
601
602 case KGD_ENGINE_CE:
603 hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
604 break;
605
606 case KGD_ENGINE_MEC1:
607 hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
608 break;
609
610 case KGD_ENGINE_MEC2:
611 hdr = (const union radeon_firmware_header *)
612 rdev->mec2_fw->data;
613 break;
614
615 case KGD_ENGINE_RLC:
616 hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
617 break;
618
619 case KGD_ENGINE_SDMA:
620 hdr = (const union radeon_firmware_header *)
621 rdev->sdma_fw->data;
622 break;
623
624 default:
625 return 0;
626 }
627
628 if (hdr == NULL)
629 return 0;
630
631 /* Only 12 bit in use*/
632 return hdr->common.ucode_version;
633}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d68223eb469..86fc56434b28 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
529 u32 current_domain = 529 u32 current_domain =
530 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 530 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
531 531
532 WARN_ONCE(bo->gem_base.dumb,
533 "GPU use of dumb buffer is illegal.\n");
534
535 /* Check if this buffer will be moved and don't move it 532 /* Check if this buffer will be moved and don't move it
536 * if we have moved too many buffers for this IB already. 533 * if we have moved too many buffers for this IB already.
537 * 534 *
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 535403e0c8a2..15aee723db77 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
1703 u32 format; 1703 u32 format;
1704 u32 *buffer; 1704 u32 *buffer;
1705 const u8 __user *data; 1705 const u8 __user *data;
1706 int size, dwords, tex_width, blit_width, spitch; 1706 unsigned int size, dwords, tex_width, blit_width, spitch;
1707 u32 height; 1707 u32 height;
1708 int i; 1708 int i;
1709 u32 texpitch, microtile; 1709 u32 texpitch, microtile;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3367960286a6..978993fa3a36 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
168 const struct tegra_dc_window *window) 168 const struct tegra_dc_window *window)
169{ 169{
170 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; 170 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
171 unsigned long value; 171 unsigned long value, flags;
172 bool yuv, planar; 172 bool yuv, planar;
173 173
174 /* 174 /*
@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
181 else 181 else
182 bpp = planar ? 1 : 2; 182 bpp = planar ? 1 : 2;
183 183
184 spin_lock_irqsave(&dc->lock, flags);
185
184 value = WINDOW_A_SELECT << index; 186 value = WINDOW_A_SELECT << index;
185 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); 187 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
186 188
@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
273 275
274 case TEGRA_BO_TILING_MODE_BLOCK: 276 case TEGRA_BO_TILING_MODE_BLOCK:
275 DRM_ERROR("hardware doesn't support block linear mode\n"); 277 DRM_ERROR("hardware doesn't support block linear mode\n");
278 spin_unlock_irqrestore(&dc->lock, flags);
276 return -EINVAL; 279 return -EINVAL;
277 } 280 }
278 281
@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
331 334
332 tegra_dc_window_commit(dc, index); 335 tegra_dc_window_commit(dc, index);
333 336
337 spin_unlock_irqrestore(&dc->lock, flags);
338
334 return 0; 339 return 0;
335} 340}
336 341
@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
338{ 343{
339 struct tegra_dc *dc = to_tegra_dc(plane->crtc); 344 struct tegra_dc *dc = to_tegra_dc(plane->crtc);
340 struct tegra_plane *p = to_tegra_plane(plane); 345 struct tegra_plane *p = to_tegra_plane(plane);
346 unsigned long flags;
341 u32 value; 347 u32 value;
342 348
343 if (!plane->crtc) 349 if (!plane->crtc)
344 return 0; 350 return 0;
345 351
352 spin_lock_irqsave(&dc->lock, flags);
353
346 value = WINDOW_A_SELECT << p->index; 354 value = WINDOW_A_SELECT << p->index;
347 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); 355 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
348 356
@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
352 360
353 tegra_dc_window_commit(dc, p->index); 361 tegra_dc_window_commit(dc, p->index);
354 362
363 spin_unlock_irqrestore(&dc->lock, flags);
364
355 return 0; 365 return 0;
356} 366}
357 367
@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
699 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 709 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
700 unsigned int h_offset = 0, v_offset = 0; 710 unsigned int h_offset = 0, v_offset = 0;
701 struct tegra_bo_tiling tiling; 711 struct tegra_bo_tiling tiling;
712 unsigned long value, flags;
702 unsigned int format, swap; 713 unsigned int format, swap;
703 unsigned long value;
704 int err; 714 int err;
705 715
706 err = tegra_fb_get_tiling(fb, &tiling); 716 err = tegra_fb_get_tiling(fb, &tiling);
707 if (err < 0) 717 if (err < 0)
708 return err; 718 return err;
709 719
720 spin_lock_irqsave(&dc->lock, flags);
721
710 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 722 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
711 723
712 value = fb->offsets[0] + y * fb->pitches[0] + 724 value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
752 764
753 case TEGRA_BO_TILING_MODE_BLOCK: 765 case TEGRA_BO_TILING_MODE_BLOCK:
754 DRM_ERROR("hardware doesn't support block linear mode\n"); 766 DRM_ERROR("hardware doesn't support block linear mode\n");
767 spin_unlock_irqrestore(&dc->lock, flags);
755 return -EINVAL; 768 return -EINVAL;
756 } 769 }
757 770
@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
778 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); 791 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
779 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 792 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
780 793
794 spin_unlock_irqrestore(&dc->lock, flags);
795
781 return 0; 796 return 0;
782} 797}
783 798
@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
814 unsigned long flags, base; 829 unsigned long flags, base;
815 struct tegra_bo *bo; 830 struct tegra_bo *bo;
816 831
817 if (!dc->event) 832 spin_lock_irqsave(&drm->event_lock, flags);
833
834 if (!dc->event) {
835 spin_unlock_irqrestore(&drm->event_lock, flags);
818 return; 836 return;
837 }
819 838
820 bo = tegra_fb_get_plane(crtc->primary->fb, 0); 839 bo = tegra_fb_get_plane(crtc->primary->fb, 0);
821 840
841 spin_lock_irqsave(&dc->lock, flags);
842
822 /* check if new start address has been latched */ 843 /* check if new start address has been latched */
844 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
823 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 845 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
824 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 846 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
825 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 847 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
826 848
849 spin_unlock_irqrestore(&dc->lock, flags);
850
827 if (base == bo->paddr + crtc->primary->fb->offsets[0]) { 851 if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
828 spin_lock_irqsave(&drm->event_lock, flags); 852 drm_crtc_send_vblank_event(crtc, dc->event);
829 drm_send_vblank_event(drm, dc->pipe, dc->event); 853 drm_crtc_vblank_put(crtc);
830 drm_vblank_put(drm, dc->pipe);
831 dc->event = NULL; 854 dc->event = NULL;
832 spin_unlock_irqrestore(&drm->event_lock, flags);
833 } 855 }
856
857 spin_unlock_irqrestore(&drm->event_lock, flags);
834} 858}
835 859
836void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) 860void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
843 867
844 if (dc->event && dc->event->base.file_priv == file) { 868 if (dc->event && dc->event->base.file_priv == file) {
845 dc->event->base.destroy(&dc->event->base); 869 dc->event->base.destroy(&dc->event->base);
846 drm_vblank_put(drm, dc->pipe); 870 drm_crtc_vblank_put(crtc);
847 dc->event = NULL; 871 dc->event = NULL;
848 } 872 }
849 873
@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
853static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 877static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
854 struct drm_pending_vblank_event *event, uint32_t page_flip_flags) 878 struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
855{ 879{
880 unsigned int pipe = drm_crtc_index(crtc);
856 struct tegra_dc *dc = to_tegra_dc(crtc); 881 struct tegra_dc *dc = to_tegra_dc(crtc);
857 struct drm_device *drm = crtc->dev;
858 882
859 if (dc->event) 883 if (dc->event)
860 return -EBUSY; 884 return -EBUSY;
861 885
862 if (event) { 886 if (event) {
863 event->pipe = dc->pipe; 887 event->pipe = pipe;
864 dc->event = event; 888 dc->event = event;
865 drm_vblank_get(drm, dc->pipe); 889 drm_crtc_vblank_get(crtc);
866 } 890 }
867 891
868 tegra_dc_set_base(dc, 0, 0, fb); 892 tegra_dc_set_base(dc, 0, 0, fb);
@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
1127 /* 1151 /*
1128 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); 1152 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
1129 */ 1153 */
1130 drm_handle_vblank(dc->base.dev, dc->pipe); 1154 drm_crtc_handle_vblank(&dc->base);
1131 tegra_dc_finish_page_flip(dc); 1155 tegra_dc_finish_page_flip(dc);
1132 } 1156 }
1133 1157
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e549afeece1f..d4f827593dfa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
694 .llseek = noop_llseek, 694 .llseek = noop_llseek,
695}; 695};
696 696
697static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe) 697static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
698 unsigned int pipe)
698{ 699{
699 struct drm_crtc *crtc; 700 struct drm_crtc *crtc;
700 701
701 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { 702 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
702 struct tegra_dc *dc = to_tegra_dc(crtc); 703 if (pipe == drm_crtc_index(crtc))
703
704 if (dc->pipe == pipe)
705 return crtc; 704 return crtc;
706 } 705 }
707 706
708 return NULL; 707 return NULL;
709} 708}
710 709
711static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc) 710static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
712{ 711{
712 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
713
714 if (!crtc)
715 return 0;
716
713 /* TODO: implement real hardware counter using syncpoints */ 717 /* TODO: implement real hardware counter using syncpoints */
714 return drm_vblank_count(dev, crtc); 718 return drm_crtc_vblank_count(crtc);
715} 719}
716 720
717static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) 721static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..8777b7f75791 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
216 } 216 }
217} 217}
218 218
219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, 219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
220 size_t size)
221{ 220{
221 struct scatterlist *s;
222 struct sg_table *sgt;
223 unsigned int i;
224
222 bo->pages = drm_gem_get_pages(&bo->gem); 225 bo->pages = drm_gem_get_pages(&bo->gem);
223 if (IS_ERR(bo->pages)) 226 if (IS_ERR(bo->pages))
224 return PTR_ERR(bo->pages); 227 return PTR_ERR(bo->pages);
225 228
226 bo->num_pages = size >> PAGE_SHIFT; 229 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
227 230
228 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 231 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
229 if (IS_ERR(bo->sgt)) { 232 if (IS_ERR(sgt))
230 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 233 goto put_pages;
231 return PTR_ERR(bo->sgt); 234
235 /*
236 * Fake up the SG table so that dma_map_sg() can be used to flush the
237 * pages associated with it. Note that this relies on the fact that
238 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
239 * only cache maintenance.
240 *
241 * TODO: Replace this by drm_clflash_sg() once it can be implemented
242 * without relying on symbols that are not exported.
243 */
244 for_each_sg(sgt->sgl, s, sgt->nents, i)
245 sg_dma_address(s) = sg_phys(s);
246
247 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
248 sgt = ERR_PTR(-ENOMEM);
249 goto release_sgt;
232 } 250 }
233 251
252 bo->sgt = sgt;
253
234 return 0; 254 return 0;
255
256release_sgt:
257 sg_free_table(sgt);
258 kfree(sgt);
259put_pages:
260 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
261 return PTR_ERR(sgt);
235} 262}
236 263
237static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, 264static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
238 size_t size)
239{ 265{
240 struct tegra_drm *tegra = drm->dev_private; 266 struct tegra_drm *tegra = drm->dev_private;
241 int err; 267 int err;
242 268
243 if (tegra->domain) { 269 if (tegra->domain) {
244 err = tegra_bo_get_pages(drm, bo, size); 270 err = tegra_bo_get_pages(drm, bo);
245 if (err < 0) 271 if (err < 0)
246 return err; 272 return err;
247 273
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
251 return err; 277 return err;
252 } 278 }
253 } else { 279 } else {
280 size_t size = bo->gem.size;
281
254 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 282 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
255 GFP_KERNEL | __GFP_NOWARN); 283 GFP_KERNEL | __GFP_NOWARN);
256 if (!bo->vaddr) { 284 if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
274 if (IS_ERR(bo)) 302 if (IS_ERR(bo))
275 return bo; 303 return bo;
276 304
277 err = tegra_bo_alloc(drm, bo, size); 305 err = tegra_bo_alloc(drm, bo);
278 if (err < 0) 306 if (err < 0)
279 goto release; 307 goto release;
280 308
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 230b6f887cd8..dfdc26970022 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -27,7 +27,8 @@ if HID
27 27
28config HID_BATTERY_STRENGTH 28config HID_BATTERY_STRENGTH
29 bool "Battery level reporting for HID devices" 29 bool "Battery level reporting for HID devices"
30 depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY 30 depends on HID
31 select POWER_SUPPLY
31 default n 32 default n
32 ---help--- 33 ---help---
33 This option adds support of reporting battery strength (for HID devices 34 This option adds support of reporting battery strength (for HID devices
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c3d0ac1a0988..8b638792cb43 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1805 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1805 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1806 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 1806 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
1807 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, 1807 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
1808 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
1808 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 1809 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
1809 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1810 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
1810 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, 1811 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7460f3402298..9243359c1821 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -526,6 +526,7 @@
526#define USB_DEVICE_ID_KYE_GPEN_560 0x5003 526#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
527#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 527#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
528#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 528#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
529#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
529#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 530#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
530 531
531#define USB_VENDOR_ID_LABTEC 0x1020 532#define USB_VENDOR_ID_LABTEC 0x1020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e0a0f06ac5ef..9505605b6e22 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
312 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 312 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
313 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 313 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
314 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 314 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
315 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
316 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
317 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
315 USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 318 USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
316 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 319 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
317 {} 320 {}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b92bf01a1ae8..158fcf577fae 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
323 } 323 }
324 break; 324 break;
325 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: 325 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
326 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
326 if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { 327 if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
327 rdesc = mousepen_i608x_rdesc_fixed; 328 rdesc = mousepen_i608x_rdesc_fixed;
328 *rsize = sizeof(mousepen_i608x_rdesc_fixed); 329 *rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
415 switch (id->product) { 416 switch (id->product) {
416 case USB_DEVICE_ID_KYE_EASYPEN_I405X: 417 case USB_DEVICE_ID_KYE_EASYPEN_I405X:
417 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: 418 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
419 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
418 case USB_DEVICE_ID_KYE_EASYPEN_M610X: 420 case USB_DEVICE_ID_KYE_EASYPEN_M610X:
419 ret = kye_tablet_enable(hdev); 421 ret = kye_tablet_enable(hdev);
420 if (ret) { 422 if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
446 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 448 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
447 USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, 449 USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
448 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 450 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
451 USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
452 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
449 USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 453 USB_DEVICE_ID_KYE_EASYPEN_M610X) },
450 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 454 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
451 USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 455 USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index c917ab61aafa..5bc6d80d5be7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
962 962
963 switch (data[0]) { 963 switch (data[0]) {
964 case REPORT_ID_DJ_SHORT: 964 case REPORT_ID_DJ_SHORT:
965 if (size != DJREPORT_SHORT_LENGTH) {
966 dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
967 return false;
968 }
965 return logi_dj_dj_event(hdev, report, data, size); 969 return logi_dj_dj_event(hdev, report, data, size);
966 case REPORT_ID_HIDPP_SHORT: 970 case REPORT_ID_HIDPP_SHORT:
967 /* intentional fallthrough */ 971 if (size != HIDPP_REPORT_SHORT_LENGTH) {
972 dev_err(&hdev->dev,
973 "Short HID++ report of bad size (%d)", size);
974 return false;
975 }
976 return logi_dj_hidpp_event(hdev, report, data, size);
968 case REPORT_ID_HIDPP_LONG: 977 case REPORT_ID_HIDPP_LONG:
978 if (size != HIDPP_REPORT_LONG_LENGTH) {
979 dev_err(&hdev->dev,
980 "Long HID++ report of bad size (%d)", size);
981 return false;
982 }
969 return logi_dj_hidpp_event(hdev, report, data, size); 983 return logi_dj_hidpp_event(hdev, report, data, size);
970 } 984 }
971 985
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2f420c0b6609..a93cefe0e522 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
282 (report->rap.sub_id == 0x41); 282 (report->rap.sub_id == 0x41);
283} 283}
284 284
285/**
286 * hidpp_prefix_name() prefixes the current given name with "Logitech ".
287 */
288static void hidpp_prefix_name(char **name, int name_length)
289{
290#define PREFIX_LENGTH 9 /* "Logitech " */
291
292 int new_length;
293 char *new_name;
294
295 if (name_length > PREFIX_LENGTH &&
296 strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
297 /* The prefix has is already in the name */
298 return;
299
300 new_length = PREFIX_LENGTH + name_length;
301 new_name = kzalloc(new_length, GFP_KERNEL);
302 if (!new_name)
303 return;
304
305 snprintf(new_name, new_length, "Logitech %s", *name);
306
307 kfree(*name);
308
309 *name = new_name;
310}
311
285/* -------------------------------------------------------------------------- */ 312/* -------------------------------------------------------------------------- */
286/* HIDP++ 1.0 commands */ 313/* HIDP++ 1.0 commands */
287/* -------------------------------------------------------------------------- */ 314/* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
321 return NULL; 348 return NULL;
322 349
323 memcpy(name, &response.rap.params[2], len); 350 memcpy(name, &response.rap.params[2], len);
351
352 /* include the terminating '\0' */
353 hidpp_prefix_name(&name, len + 1);
354
324 return name; 355 return name;
325} 356}
326 357
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
498 index += ret; 529 index += ret;
499 } 530 }
500 531
532 /* include the terminating '\0' */
533 hidpp_prefix_name(&name, __name_length + 1);
534
501 return name; 535 return name;
502} 536}
503 537
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
794 828
795 switch (data[0]) { 829 switch (data[0]) {
796 case 0x02: 830 case 0x02:
831 if (size < 2) {
832 hid_err(hdev, "Received HID report of bad size (%d)",
833 size);
834 return 1;
835 }
797 if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { 836 if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
798 input_event(wd->input, EV_KEY, BTN_LEFT, 837 input_event(wd->input, EV_KEY, BTN_LEFT,
799 !!(data[1] & 0x01)); 838 !!(data[1] & 0x01));
800 input_event(wd->input, EV_KEY, BTN_RIGHT, 839 input_event(wd->input, EV_KEY, BTN_RIGHT,
801 !!(data[1] & 0x02)); 840 !!(data[1] & 0x02));
802 input_sync(wd->input); 841 input_sync(wd->input);
842 return 0;
803 } else { 843 } else {
804 if (size < 21) 844 if (size < 21)
805 return 1; 845 return 1;
806 return wtp_mouse_raw_xy_event(hidpp, &data[7]); 846 return wtp_mouse_raw_xy_event(hidpp, &data[7]);
807 } 847 }
808 case REPORT_ID_HIDPP_LONG: 848 case REPORT_ID_HIDPP_LONG:
849 /* size is already checked in hidpp_raw_event. */
809 if ((report->fap.feature_index != wd->mt_feature_index) || 850 if ((report->fap.feature_index != wd->mt_feature_index) ||
810 (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) 851 (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
811 return 1; 852 return 1;
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1a07e07d99a0..47d7e74231e5 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -35,6 +35,8 @@ static struct class *pyra_class;
35static void profile_activated(struct pyra_device *pyra, 35static void profile_activated(struct pyra_device *pyra,
36 unsigned int new_profile) 36 unsigned int new_profile)
37{ 37{
38 if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
39 return;
38 pyra->actual_profile = new_profile; 40 pyra->actual_profile = new_profile;
39 pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; 41 pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
40} 42}
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
257 if (off != 0 || count != PYRA_SIZE_SETTINGS) 259 if (off != 0 || count != PYRA_SIZE_SETTINGS)
258 return -EINVAL; 260 return -EINVAL;
259 261
260 mutex_lock(&pyra->pyra_lock);
261
262 settings = (struct pyra_settings const *)buf; 262 settings = (struct pyra_settings const *)buf;
263 if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
264 return -EINVAL;
265
266 mutex_lock(&pyra->pyra_lock);
263 267
264 retval = pyra_set_settings(usb_dev, settings); 268 retval = pyra_set_settings(usb_dev, settings);
265 if (retval) { 269 if (retval) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d32037cbf9db..d43e967e7533 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
706 706
707static void i2c_hid_stop(struct hid_device *hid) 707static void i2c_hid_stop(struct hid_device *hid)
708{ 708{
709 struct i2c_client *client = hid->driver_data;
710 struct i2c_hid *ihid = i2c_get_clientdata(client);
711
712 hid->claimed = 0; 709 hid->claimed = 0;
713
714 i2c_hid_free_buffers(ihid);
715} 710}
716 711
717static int i2c_hid_open(struct hid_device *hid) 712static int i2c_hid_open(struct hid_device *hid)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dc89be90b35e..b27b3d33ebab 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
124 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, 124 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
125 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, 125 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
126 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 126 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
127 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
127 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 128 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
128 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 129 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
129 { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS }, 130 { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index e37412da15f5..b99de00e57b8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
143 case ad7998: 143 case ad7998:
144 return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG, 144 return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
145 val); 145 val);
146 default: 146 case ad7992:
147 case ad7993:
148 case ad7994:
147 return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG, 149 return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
148 val); 150 val);
151 default:
152 /* Will be written when doing a conversion */
153 st->config = val;
154 return 0;
149 } 155 }
150} 156}
151 157
@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
155 case ad7997: 161 case ad7997:
156 case ad7998: 162 case ad7998:
157 return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG); 163 return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
158 default: 164 case ad7992:
165 case ad7993:
166 case ad7994:
159 return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG); 167 return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
168 default:
169 /* No readback support */
170 return st->config;
160 } 171 }
161} 172}
162 173
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 866fe904cba2..90c8cb727cc7 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -449,6 +449,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
449 if (val2 == NULL) 449 if (val2 == NULL)
450 val2 = &unused; 450 val2 = &unused;
451 451
452 if(!iio_channel_has_info(chan->channel, info))
453 return -EINVAL;
454
452 if (chan->indio_dev->info->read_raw_multi) { 455 if (chan->indio_dev->info->read_raw_multi) {
453 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, 456 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
454 chan->channel, INDIO_MAX_RAW_ELEMENTS, 457 chan->channel, INDIO_MAX_RAW_ELEMENTS,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57ecc5b204f3..9117b7a2d5f8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
1114 struct mlx4_dev *dev = to_mdev(qp->device)->dev; 1114 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1115 int err = 0; 1115 int err = 0;
1116 1116
1117 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1117 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1118 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1118 return 0; /* do nothing */ 1119 return 0; /* do nothing */
1119 1120
1120 ib_flow = flow_attr + 1; 1121 ib_flow = flow_attr + 1;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 8afa28e4570e..18d4b2c8fe55 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,6 +28,13 @@
28#include <linux/cdev.h> 28#include <linux/cdev.h>
29#include "input-compat.h" 29#include "input-compat.h"
30 30
31enum evdev_clock_type {
32 EV_CLK_REAL = 0,
33 EV_CLK_MONO,
34 EV_CLK_BOOT,
35 EV_CLK_MAX
36};
37
31struct evdev { 38struct evdev {
32 int open; 39 int open;
33 struct input_handle handle; 40 struct input_handle handle;
@@ -49,12 +56,32 @@ struct evdev_client {
49 struct fasync_struct *fasync; 56 struct fasync_struct *fasync;
50 struct evdev *evdev; 57 struct evdev *evdev;
51 struct list_head node; 58 struct list_head node;
52 int clkid; 59 int clk_type;
53 bool revoked; 60 bool revoked;
54 unsigned int bufsize; 61 unsigned int bufsize;
55 struct input_event buffer[]; 62 struct input_event buffer[];
56}; 63};
57 64
65static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
66{
67 switch (clkid) {
68
69 case CLOCK_REALTIME:
70 client->clk_type = EV_CLK_REAL;
71 break;
72 case CLOCK_MONOTONIC:
73 client->clk_type = EV_CLK_MONO;
74 break;
75 case CLOCK_BOOTTIME:
76 client->clk_type = EV_CLK_BOOT;
77 break;
78 default:
79 return -EINVAL;
80 }
81
82 return 0;
83}
84
58/* flush queued events of type @type, caller must hold client->buffer_lock */ 85/* flush queued events of type @type, caller must hold client->buffer_lock */
59static void __evdev_flush_queue(struct evdev_client *client, unsigned int type) 86static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
60{ 87{
@@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
108 struct input_event ev; 135 struct input_event ev;
109 ktime_t time; 136 ktime_t time;
110 137
111 time = (client->clkid == CLOCK_MONOTONIC) ? 138 time = client->clk_type == EV_CLK_REAL ?
112 ktime_get() : ktime_get_real(); 139 ktime_get_real() :
140 client->clk_type == EV_CLK_MONO ?
141 ktime_get() :
142 ktime_get_boottime();
113 143
114 ev.time = ktime_to_timeval(time); 144 ev.time = ktime_to_timeval(time);
115 ev.type = EV_SYN; 145 ev.type = EV_SYN;
@@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
159 189
160static void evdev_pass_values(struct evdev_client *client, 190static void evdev_pass_values(struct evdev_client *client,
161 const struct input_value *vals, unsigned int count, 191 const struct input_value *vals, unsigned int count,
162 ktime_t mono, ktime_t real) 192 ktime_t *ev_time)
163{ 193{
164 struct evdev *evdev = client->evdev; 194 struct evdev *evdev = client->evdev;
165 const struct input_value *v; 195 const struct input_value *v;
@@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
169 if (client->revoked) 199 if (client->revoked)
170 return; 200 return;
171 201
172 event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ? 202 event.time = ktime_to_timeval(ev_time[client->clk_type]);
173 mono : real);
174 203
175 /* Interrupts are disabled, just acquire the lock. */ 204 /* Interrupts are disabled, just acquire the lock. */
176 spin_lock(&client->buffer_lock); 205 spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
198{ 227{
199 struct evdev *evdev = handle->private; 228 struct evdev *evdev = handle->private;
200 struct evdev_client *client; 229 struct evdev_client *client;
201 ktime_t time_mono, time_real; 230 ktime_t ev_time[EV_CLK_MAX];
202 231
203 time_mono = ktime_get(); 232 ev_time[EV_CLK_MONO] = ktime_get();
204 time_real = ktime_mono_to_real(time_mono); 233 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
234 ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
235 TK_OFFS_BOOT);
205 236
206 rcu_read_lock(); 237 rcu_read_lock();
207 238
208 client = rcu_dereference(evdev->grab); 239 client = rcu_dereference(evdev->grab);
209 240
210 if (client) 241 if (client)
211 evdev_pass_values(client, vals, count, time_mono, time_real); 242 evdev_pass_values(client, vals, count, ev_time);
212 else 243 else
213 list_for_each_entry_rcu(client, &evdev->client_list, node) 244 list_for_each_entry_rcu(client, &evdev->client_list, node)
214 evdev_pass_values(client, vals, count, 245 evdev_pass_values(client, vals, count, ev_time);
215 time_mono, time_real);
216 246
217 rcu_read_unlock(); 247 rcu_read_unlock();
218} 248}
@@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
877 case EVIOCSCLOCKID: 907 case EVIOCSCLOCKID:
878 if (copy_from_user(&i, p, sizeof(unsigned int))) 908 if (copy_from_user(&i, p, sizeof(unsigned int)))
879 return -EFAULT; 909 return -EFAULT;
880 if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME) 910
881 return -EINVAL; 911 return evdev_set_clk_type(client, i);
882 client->clkid = i;
883 return 0;
884 912
885 case EVIOCGKEYCODE: 913 case EVIOCGKEYCODE:
886 return evdev_handle_get_keycode(dev, p); 914 return evdev_handle_get_keycode(dev, p);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 04217c2e345c..213e3a1903ee 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1974 1974
1975 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 1975 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
1976 1976
1977 for (i = 0; i < ABS_CNT; i++) { 1977 if (test_bit(EV_ABS, dev->evbit)) {
1978 if (test_bit(i, dev->absbit)) { 1978 for (i = 0; i < ABS_CNT; i++) {
1979 if (input_is_mt_axis(i)) 1979 if (test_bit(i, dev->absbit)) {
1980 events += mt_slots; 1980 if (input_is_mt_axis(i))
1981 else 1981 events += mt_slots;
1982 events++; 1982 else
1983 events++;
1984 }
1983 } 1985 }
1984 } 1986 }
1985 1987
1986 for (i = 0; i < REL_CNT; i++) 1988 if (test_bit(EV_REL, dev->evbit)) {
1987 if (test_bit(i, dev->relbit)) 1989 for (i = 0; i < REL_CNT; i++)
1988 events++; 1990 if (test_bit(i, dev->relbit))
1991 events++;
1992 }
1989 1993
1990 /* Make room for KEY and MSC events */ 1994 /* Make room for KEY and MSC events */
1991 events += 7; 1995 events += 7;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 96ee26c555e0..a5d9b3f3c871 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
559config KEYBOARD_STMPE 559config KEYBOARD_STMPE
560 tristate "STMPE keypad support" 560 tristate "STMPE keypad support"
561 depends on MFD_STMPE 561 depends on MFD_STMPE
562 depends on OF
562 select INPUT_MATRIXKMAP 563 select INPUT_MATRIXKMAP
563 help 564 help
564 Say Y here if you want to use the keypad controller on STMPE I/O 565 Say Y here if you want to use the keypad controller on STMPE I/O
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index d4dd78a7d56b..883d6aed5b9a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -35,9 +35,13 @@
35struct gpio_button_data { 35struct gpio_button_data {
36 const struct gpio_keys_button *button; 36 const struct gpio_keys_button *button;
37 struct input_dev *input; 37 struct input_dev *input;
38 struct timer_list timer; 38
39 struct work_struct work; 39 struct timer_list release_timer;
40 unsigned int timer_debounce; /* in msecs */ 40 unsigned int release_delay; /* in msecs, for IRQ-only buttons */
41
42 struct delayed_work work;
43 unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
44
41 unsigned int irq; 45 unsigned int irq;
42 spinlock_t lock; 46 spinlock_t lock;
43 bool disabled; 47 bool disabled;
@@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
116{ 120{
117 if (!bdata->disabled) { 121 if (!bdata->disabled) {
118 /* 122 /*
119 * Disable IRQ and possible debouncing timer. 123 * Disable IRQ and associated timer/work structure.
120 */ 124 */
121 disable_irq(bdata->irq); 125 disable_irq(bdata->irq);
122 if (bdata->timer_debounce) 126
123 del_timer_sync(&bdata->timer); 127 if (gpio_is_valid(bdata->button->gpio))
128 cancel_delayed_work_sync(&bdata->work);
129 else
130 del_timer_sync(&bdata->release_timer);
124 131
125 bdata->disabled = true; 132 bdata->disabled = true;
126 } 133 }
@@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
343static void gpio_keys_gpio_work_func(struct work_struct *work) 350static void gpio_keys_gpio_work_func(struct work_struct *work)
344{ 351{
345 struct gpio_button_data *bdata = 352 struct gpio_button_data *bdata =
346 container_of(work, struct gpio_button_data, work); 353 container_of(work, struct gpio_button_data, work.work);
347 354
348 gpio_keys_gpio_report_event(bdata); 355 gpio_keys_gpio_report_event(bdata);
349 356
@@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
351 pm_relax(bdata->input->dev.parent); 358 pm_relax(bdata->input->dev.parent);
352} 359}
353 360
354static void gpio_keys_gpio_timer(unsigned long _data)
355{
356 struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
357
358 schedule_work(&bdata->work);
359}
360
361static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) 361static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
362{ 362{
363 struct gpio_button_data *bdata = dev_id; 363 struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
366 366
367 if (bdata->button->wakeup) 367 if (bdata->button->wakeup)
368 pm_stay_awake(bdata->input->dev.parent); 368 pm_stay_awake(bdata->input->dev.parent);
369 if (bdata->timer_debounce) 369
370 mod_timer(&bdata->timer, 370 mod_delayed_work(system_wq,
371 jiffies + msecs_to_jiffies(bdata->timer_debounce)); 371 &bdata->work,
372 else 372 msecs_to_jiffies(bdata->software_debounce));
373 schedule_work(&bdata->work);
374 373
375 return IRQ_HANDLED; 374 return IRQ_HANDLED;
376} 375}
@@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
408 input_event(input, EV_KEY, button->code, 1); 407 input_event(input, EV_KEY, button->code, 1);
409 input_sync(input); 408 input_sync(input);
410 409
411 if (!bdata->timer_debounce) { 410 if (!bdata->release_delay) {
412 input_event(input, EV_KEY, button->code, 0); 411 input_event(input, EV_KEY, button->code, 0);
413 input_sync(input); 412 input_sync(input);
414 goto out; 413 goto out;
@@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
417 bdata->key_pressed = true; 416 bdata->key_pressed = true;
418 } 417 }
419 418
420 if (bdata->timer_debounce) 419 if (bdata->release_delay)
421 mod_timer(&bdata->timer, 420 mod_timer(&bdata->release_timer,
422 jiffies + msecs_to_jiffies(bdata->timer_debounce)); 421 jiffies + msecs_to_jiffies(bdata->release_delay));
423out: 422out:
424 spin_unlock_irqrestore(&bdata->lock, flags); 423 spin_unlock_irqrestore(&bdata->lock, flags);
425 return IRQ_HANDLED; 424 return IRQ_HANDLED;
@@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
429{ 428{
430 struct gpio_button_data *bdata = data; 429 struct gpio_button_data *bdata = data;
431 430
432 if (bdata->timer_debounce) 431 if (gpio_is_valid(bdata->button->gpio))
433 del_timer_sync(&bdata->timer); 432 cancel_delayed_work_sync(&bdata->work);
434 433 else
435 cancel_work_sync(&bdata->work); 434 del_timer_sync(&bdata->release_timer);
436} 435}
437 436
438static int gpio_keys_setup_key(struct platform_device *pdev, 437static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
466 button->debounce_interval * 1000); 465 button->debounce_interval * 1000);
467 /* use timer if gpiolib doesn't provide debounce */ 466 /* use timer if gpiolib doesn't provide debounce */
468 if (error < 0) 467 if (error < 0)
469 bdata->timer_debounce = 468 bdata->software_debounce =
470 button->debounce_interval; 469 button->debounce_interval;
471 } 470 }
472 471
473 irq = gpio_to_irq(button->gpio); 472 if (button->irq) {
474 if (irq < 0) { 473 bdata->irq = button->irq;
475 error = irq; 474 } else {
476 dev_err(dev, 475 irq = gpio_to_irq(button->gpio);
477 "Unable to get irq number for GPIO %d, error %d\n", 476 if (irq < 0) {
478 button->gpio, error); 477 error = irq;
479 return error; 478 dev_err(dev,
479 "Unable to get irq number for GPIO %d, error %d\n",
480 button->gpio, error);
481 return error;
482 }
483 bdata->irq = irq;
480 } 484 }
481 bdata->irq = irq;
482 485
483 INIT_WORK(&bdata->work, gpio_keys_gpio_work_func); 486 INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
484 setup_timer(&bdata->timer,
485 gpio_keys_gpio_timer, (unsigned long)bdata);
486 487
487 isr = gpio_keys_gpio_isr; 488 isr = gpio_keys_gpio_isr;
488 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; 489 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
499 return -EINVAL; 500 return -EINVAL;
500 } 501 }
501 502
502 bdata->timer_debounce = button->debounce_interval; 503 bdata->release_delay = button->debounce_interval;
503 setup_timer(&bdata->timer, 504 setup_timer(&bdata->release_timer,
504 gpio_keys_irq_timer, (unsigned long)bdata); 505 gpio_keys_irq_timer, (unsigned long)bdata);
505 506
506 isr = gpio_keys_irq_isr; 507 isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
510 input_set_capability(input, button->type ?: EV_KEY, button->code); 511 input_set_capability(input, button->type ?: EV_KEY, button->code);
511 512
512 /* 513 /*
513 * Install custom action to cancel debounce timer and 514 * Install custom action to cancel release timer and
514 * workqueue item. 515 * workqueue item.
515 */ 516 */
516 error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata); 517 error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
618 619
619 i = 0; 620 i = 0;
620 for_each_child_of_node(node, pp) { 621 for_each_child_of_node(node, pp) {
621 int gpio = -1;
622 enum of_gpio_flags flags; 622 enum of_gpio_flags flags;
623 623
624 button = &pdata->buttons[i++]; 624 button = &pdata->buttons[i++];
625 625
626 if (!of_find_property(pp, "gpios", NULL)) { 626 button->gpio = of_get_gpio_flags(pp, 0, &flags);
627 button->irq = irq_of_parse_and_map(pp, 0); 627 if (button->gpio < 0) {
628 if (button->irq == 0) { 628 error = button->gpio;
629 i--; 629 if (error != -ENOENT) {
630 pdata->nbuttons--;
631 dev_warn(dev, "Found button without gpios or irqs\n");
632 continue;
633 }
634 } else {
635 gpio = of_get_gpio_flags(pp, 0, &flags);
636 if (gpio < 0) {
637 error = gpio;
638 if (error != -EPROBE_DEFER) 630 if (error != -EPROBE_DEFER)
639 dev_err(dev, 631 dev_err(dev,
640 "Failed to get gpio flags, error: %d\n", 632 "Failed to get gpio flags, error: %d\n",
641 error); 633 error);
642 return ERR_PTR(error); 634 return ERR_PTR(error);
643 } 635 }
636 } else {
637 button->active_low = flags & OF_GPIO_ACTIVE_LOW;
644 } 638 }
645 639
646 button->gpio = gpio; 640 button->irq = irq_of_parse_and_map(pp, 0);
647 button->active_low = flags & OF_GPIO_ACTIVE_LOW; 641
642 if (!gpio_is_valid(button->gpio) && !button->irq) {
643 dev_err(dev, "Found button without gpios or irqs\n");
644 return ERR_PTR(-EINVAL);
645 }
648 646
649 if (of_property_read_u32(pp, "linux,code", &button->code)) { 647 if (of_property_read_u32(pp, "linux,code", &button->code)) {
650 dev_err(dev, "Button without keycode: 0x%x\n", 648 dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
659 657
660 button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL); 658 button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
661 659
660 button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
661
662 if (of_property_read_u32(pp, "debounce-interval", 662 if (of_property_read_u32(pp, "debounce-interval",
663 &button->debounce_interval)) 663 &button->debounce_interval))
664 button->debounce_interval = 5; 664 button->debounce_interval = 5;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 610a8af795a1..5b152f25a8e1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
473 if (error) 473 if (error)
474 goto bail1; 474 goto bail1;
475 475
476 init_completion(&dev->cmd_done); 476 reinit_completion(&dev->cmd_done);
477 serio_write(serio, 0); 477 serio_write(serio, 0);
478 serio_write(serio, 0); 478 serio_write(serio, 0);
479 serio_write(serio, HIL_PKT_CMD >> 8); 479 serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
482 if (error) 482 if (error)
483 goto bail1; 483 goto bail1;
484 484
485 init_completion(&dev->cmd_done); 485 reinit_completion(&dev->cmd_done);
486 serio_write(serio, 0); 486 serio_write(serio, 0);
487 serio_write(serio, 0); 487 serio_write(serio, 0);
488 serio_write(serio, HIL_PKT_CMD >> 8); 488 serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
491 if (error) 491 if (error)
492 goto bail1; 492 goto bail1;
493 493
494 init_completion(&dev->cmd_done); 494 reinit_completion(&dev->cmd_done);
495 serio_write(serio, 0); 495 serio_write(serio, 0);
496 serio_write(serio, 0); 496 serio_write(serio, 0);
497 serio_write(serio, HIL_PKT_CMD >> 8); 497 serio_write(serio, HIL_PKT_CMD >> 8);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ef5e67fb567e..fe6e3f22eed7 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -45,13 +45,14 @@
45#define STMPE_KEYPAD_MAX_ROWS 8 45#define STMPE_KEYPAD_MAX_ROWS 8
46#define STMPE_KEYPAD_MAX_COLS 8 46#define STMPE_KEYPAD_MAX_COLS 8
47#define STMPE_KEYPAD_ROW_SHIFT 3 47#define STMPE_KEYPAD_ROW_SHIFT 3
48#define STMPE_KEYPAD_KEYMAP_SIZE \ 48#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
49 (STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS) 49 (STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
50 50
51/** 51/**
52 * struct stmpe_keypad_variant - model-specific attributes 52 * struct stmpe_keypad_variant - model-specific attributes
53 * @auto_increment: whether the KPC_DATA_BYTE register address 53 * @auto_increment: whether the KPC_DATA_BYTE register address
54 * auto-increments on multiple read 54 * auto-increments on multiple read
55 * @set_pullup: whether the pins need to have their pull-ups set
55 * @num_data: number of data bytes 56 * @num_data: number of data bytes
56 * @num_normal_data: number of normal keys' data bytes 57 * @num_normal_data: number of normal keys' data bytes
57 * @max_cols: maximum number of columns supported 58 * @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
61 */ 62 */
62struct stmpe_keypad_variant { 63struct stmpe_keypad_variant {
63 bool auto_increment; 64 bool auto_increment;
65 bool set_pullup;
64 int num_data; 66 int num_data;
65 int num_normal_data; 67 int num_normal_data;
66 int max_cols; 68 int max_cols;
@@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
81 }, 83 },
82 [STMPE2401] = { 84 [STMPE2401] = {
83 .auto_increment = false, 85 .auto_increment = false,
86 .set_pullup = true,
84 .num_data = 3, 87 .num_data = 3,
85 .num_normal_data = 2, 88 .num_normal_data = 2,
86 .max_cols = 8, 89 .max_cols = 8,
@@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
90 }, 93 },
91 [STMPE2403] = { 94 [STMPE2403] = {
92 .auto_increment = true, 95 .auto_increment = true,
96 .set_pullup = true,
93 .num_data = 5, 97 .num_data = 5,
94 .num_normal_data = 3, 98 .num_normal_data = 3,
95 .max_cols = 8, 99 .max_cols = 8,
@@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
99 }, 103 },
100}; 104};
101 105
106/**
107 * struct stmpe_keypad - STMPE keypad state container
108 * @stmpe: pointer to parent STMPE device
109 * @input: spawned input device
110 * @variant: STMPE variant
111 * @debounce_ms: debounce interval, in ms. Maximum is
112 * %STMPE_KEYPAD_MAX_DEBOUNCE.
113 * @scan_count: number of key scanning cycles to confirm key data.
114 * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
115 * @no_autorepeat: disable key autorepeat
116 * @rows: bitmask for the rows
117 * @cols: bitmask for the columns
118 * @keymap: the keymap
119 */
102struct stmpe_keypad { 120struct stmpe_keypad {
103 struct stmpe *stmpe; 121 struct stmpe *stmpe;
104 struct input_dev *input; 122 struct input_dev *input;
105 const struct stmpe_keypad_variant *variant; 123 const struct stmpe_keypad_variant *variant;
106 const struct stmpe_keypad_platform_data *plat; 124 unsigned int debounce_ms;
107 125 unsigned int scan_count;
126 bool no_autorepeat;
108 unsigned int rows; 127 unsigned int rows;
109 unsigned int cols; 128 unsigned int cols;
110 129 unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
111 unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
112}; 130};
113 131
114static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data) 132static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
171 unsigned int col_gpios = variant->col_gpios; 189 unsigned int col_gpios = variant->col_gpios;
172 unsigned int row_gpios = variant->row_gpios; 190 unsigned int row_gpios = variant->row_gpios;
173 struct stmpe *stmpe = keypad->stmpe; 191 struct stmpe *stmpe = keypad->stmpe;
192 u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
174 unsigned int pins = 0; 193 unsigned int pins = 0;
194 unsigned int pu_pins = 0;
195 int ret;
175 int i; 196 int i;
176 197
177 /* 198 /*
@@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
188 for (i = 0; i < variant->max_cols; i++) { 209 for (i = 0; i < variant->max_cols; i++) {
189 int num = __ffs(col_gpios); 210 int num = __ffs(col_gpios);
190 211
191 if (keypad->cols & (1 << i)) 212 if (keypad->cols & (1 << i)) {
192 pins |= 1 << num; 213 pins |= 1 << num;
214 pu_pins |= 1 << num;
215 }
193 216
194 col_gpios &= ~(1 << num); 217 col_gpios &= ~(1 << num);
195 } 218 }
@@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
203 row_gpios &= ~(1 << num); 226 row_gpios &= ~(1 << num);
204 } 227 }
205 228
206 return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD); 229 ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
230 if (ret)
231 return ret;
232
233 /*
234 * On STMPE24xx, set pin bias to pull-up on all keypad input
235 * pins (columns), this incidentally happen to be maximum 8 pins
236 * and placed at GPIO0-7 so only the LSB of the pull up register
237 * ever needs to be written.
238 */
239 if (variant->set_pullup) {
240 u8 val;
241
242 ret = stmpe_reg_read(stmpe, pureg);
243 if (ret)
244 return ret;
245
246 /* Do not touch unused pins, may be used for GPIO */
247 val = ret & ~pu_pins;
248 val |= pu_pins;
249
250 ret = stmpe_reg_write(stmpe, pureg, val);
251 }
252
253 return 0;
207} 254}
208 255
209static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad) 256static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
210{ 257{
211 const struct stmpe_keypad_platform_data *plat = keypad->plat;
212 const struct stmpe_keypad_variant *variant = keypad->variant; 258 const struct stmpe_keypad_variant *variant = keypad->variant;
213 struct stmpe *stmpe = keypad->stmpe; 259 struct stmpe *stmpe = keypad->stmpe;
214 int ret; 260 int ret;
215 261
216 if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE) 262 if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
217 return -EINVAL; 263 return -EINVAL;
218 264
219 if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT) 265 if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
220 return -EINVAL; 266 return -EINVAL;
221 267
222 ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD); 268 ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
245 291
246 ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB, 292 ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
247 STMPE_KPC_CTRL_MSB_SCAN_COUNT, 293 STMPE_KPC_CTRL_MSB_SCAN_COUNT,
248 plat->scan_count << 4); 294 keypad->scan_count << 4);
249 if (ret < 0) 295 if (ret < 0)
250 return ret; 296 return ret;
251 297
@@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
253 STMPE_KPC_CTRL_LSB_SCAN | 299 STMPE_KPC_CTRL_LSB_SCAN |
254 STMPE_KPC_CTRL_LSB_DEBOUNCE, 300 STMPE_KPC_CTRL_LSB_DEBOUNCE,
255 STMPE_KPC_CTRL_LSB_SCAN | 301 STMPE_KPC_CTRL_LSB_SCAN |
256 (plat->debounce_ms << 1)); 302 (keypad->debounce_ms << 1));
257} 303}
258 304
259static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad) 305static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
306 u32 used_rows, u32 used_cols)
260{ 307{
261 int row, col; 308 int row, col;
262 309
263 for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) { 310 for (row = 0; row < used_rows; row++) {
264 for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) { 311 for (col = 0; col < used_cols; col++) {
265 int code = MATRIX_SCAN_CODE(row, col, 312 int code = MATRIX_SCAN_CODE(row, col,
266 STMPE_KEYPAD_ROW_SHIFT); 313 STMPE_KEYPAD_ROW_SHIFT);
267 if (keypad->keymap[code] != KEY_RESERVED) { 314 if (keypad->keymap[code] != KEY_RESERVED) {
268 keypad->rows |= 1 << row; 315 keypad->rows |= 1 << row;
269 keypad->cols |= 1 << col; 316 keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
272 } 319 }
273} 320}
274 321
275#ifdef CONFIG_OF
276static const struct stmpe_keypad_platform_data *
277stmpe_keypad_of_probe(struct device *dev)
278{
279 struct device_node *np = dev->of_node;
280 struct stmpe_keypad_platform_data *plat;
281
282 if (!np)
283 return ERR_PTR(-ENODEV);
284
285 plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
286 if (!plat)
287 return ERR_PTR(-ENOMEM);
288
289 of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
290 of_property_read_u32(np, "st,scan-count", &plat->scan_count);
291
292 plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
293
294 return plat;
295}
296#else
297static inline const struct stmpe_keypad_platform_data *
298stmpe_keypad_of_probe(struct device *dev)
299{
300 return ERR_PTR(-EINVAL);
301}
302#endif
303
304static int stmpe_keypad_probe(struct platform_device *pdev) 322static int stmpe_keypad_probe(struct platform_device *pdev)
305{ 323{
306 struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent); 324 struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
307 const struct stmpe_keypad_platform_data *plat; 325 struct device_node *np = pdev->dev.of_node;
308 struct stmpe_keypad *keypad; 326 struct stmpe_keypad *keypad;
309 struct input_dev *input; 327 struct input_dev *input;
328 u32 rows;
329 u32 cols;
310 int error; 330 int error;
311 int irq; 331 int irq;
312 332
313 plat = stmpe->pdata->keypad;
314 if (!plat) {
315 plat = stmpe_keypad_of_probe(&pdev->dev);
316 if (IS_ERR(plat))
317 return PTR_ERR(plat);
318 }
319
320 irq = platform_get_irq(pdev, 0); 333 irq = platform_get_irq(pdev, 0);
321 if (irq < 0) 334 if (irq < 0)
322 return irq; 335 return irq;
@@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
326 if (!keypad) 339 if (!keypad)
327 return -ENOMEM; 340 return -ENOMEM;
328 341
342 keypad->stmpe = stmpe;
343 keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
344
345 of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
346 of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
347 keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
348
329 input = devm_input_allocate_device(&pdev->dev); 349 input = devm_input_allocate_device(&pdev->dev);
330 if (!input) 350 if (!input)
331 return -ENOMEM; 351 return -ENOMEM;
@@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
334 input->id.bustype = BUS_I2C; 354 input->id.bustype = BUS_I2C;
335 input->dev.parent = &pdev->dev; 355 input->dev.parent = &pdev->dev;
336 356
337 error = matrix_keypad_build_keymap(plat->keymap_data, NULL, 357 error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
338 STMPE_KEYPAD_MAX_ROWS, 358 if (error)
339 STMPE_KEYPAD_MAX_COLS, 359 return error;
360
361 error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
340 keypad->keymap, input); 362 keypad->keymap, input);
341 if (error) 363 if (error)
342 return error; 364 return error;
343 365
344 input_set_capability(input, EV_MSC, MSC_SCAN); 366 input_set_capability(input, EV_MSC, MSC_SCAN);
345 if (!plat->no_autorepeat) 367 if (!keypad->no_autorepeat)
346 __set_bit(EV_REP, input->evbit); 368 __set_bit(EV_REP, input->evbit);
347 369
348 stmpe_keypad_fill_used_pins(keypad); 370 stmpe_keypad_fill_used_pins(keypad, rows, cols);
349 371
350 keypad->stmpe = stmpe;
351 keypad->plat = plat;
352 keypad->input = input; 372 keypad->input = input;
353 keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
354 373
355 error = stmpe_keypad_chip_init(keypad); 374 error = stmpe_keypad_chip_init(keypad);
356 if (error < 0) 375 if (error < 0)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d125a019383f..d88d73d83552 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
881 unsigned char *pkt, 881 unsigned char *pkt,
882 unsigned char pkt_id) 882 unsigned char pkt_id)
883{ 883{
884 /*
885 * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
886 * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
887 * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
888 * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
889 * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
890 * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
891 * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
892 * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
893 * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
894 * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
895 * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
896 * L: Left button
897 * R / M: Non-clickpads: Right / Middle button
898 * Clickpads: When > 2 fingers are down, and some fingers
899 * are in the button area, then the 2 coordinates reported
900 * are for fingers outside the button area and these report
901 * extra fingers being present in the right / left button
902 * area. Note these fingers are not added to the F field!
903 * so if a TWO packet is received and R = 1 then there are
904 * 3 fingers down, etc.
905 * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
906 * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
907 * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
908 * in NEW fmt
909 * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
910 */
911
884 mt[0].x = ((pkt[2] & 0x80) << 4); 912 mt[0].x = ((pkt[2] & 0x80) << 4);
885 mt[0].x |= ((pkt[2] & 0x3F) << 5); 913 mt[0].x |= ((pkt[2] & 0x3F) << 5);
886 mt[0].x |= ((pkt[3] & 0x30) >> 1); 914 mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
919 947
920static int alps_get_mt_count(struct input_mt_pos *mt) 948static int alps_get_mt_count(struct input_mt_pos *mt)
921{ 949{
922 int i; 950 int i, fingers = 0;
923 951
924 for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++) 952 for (i = 0; i < MAX_TOUCHES; i++) {
925 /* empty */; 953 if (mt[i].x != 0 || mt[i].y != 0)
954 fingers++;
955 }
926 956
927 return i; 957 return fingers;
928} 958}
929 959
930static int alps_decode_packet_v7(struct alps_fields *f, 960static int alps_decode_packet_v7(struct alps_fields *f,
931 unsigned char *p, 961 unsigned char *p,
932 struct psmouse *psmouse) 962 struct psmouse *psmouse)
933{ 963{
964 struct alps_data *priv = psmouse->private;
934 unsigned char pkt_id; 965 unsigned char pkt_id;
935 966
936 pkt_id = alps_get_packet_id_v7(p); 967 pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
938 return 0; 969 return 0;
939 if (pkt_id == V7_PACKET_ID_UNKNOWN) 970 if (pkt_id == V7_PACKET_ID_UNKNOWN)
940 return -1; 971 return -1;
972 /*
973 * NEW packets are send to indicate a discontinuity in the finger
974 * coordinate reporting. Specifically a finger may have moved from
975 * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
976 * us.
977 *
978 * NEW packets have 3 problems:
979 * 1) They do not contain middle / right button info (on non clickpads)
980 * this can be worked around by preserving the old button state
981 * 2) They do not contain an accurate fingercount, and they are
982 * typically send when the number of fingers changes. We cannot use
983 * the old finger count as that may mismatch with the amount of
984 * touch coordinates we've available in the NEW packet
985 * 3) Their x data for the second touch is inaccurate leading to
986 * a possible jump of the x coordinate by 16 units when the first
987 * non NEW packet comes in
988 * Since problems 2 & 3 cannot be worked around, just ignore them.
989 */
990 if (pkt_id == V7_PACKET_ID_NEW)
991 return 1;
941 992
942 alps_get_finger_coordinate_v7(f->mt, p, pkt_id); 993 alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
943 994
944 if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) { 995 if (pkt_id == V7_PACKET_ID_TWO)
945 f->left = (p[0] & 0x80) >> 7; 996 f->fingers = alps_get_mt_count(f->mt);
997 else /* pkt_id == V7_PACKET_ID_MULTI */
998 f->fingers = 3 + (p[5] & 0x03);
999
1000 f->left = (p[0] & 0x80) >> 7;
1001 if (priv->flags & ALPS_BUTTONPAD) {
1002 if (p[0] & 0x20)
1003 f->fingers++;
1004 if (p[0] & 0x10)
1005 f->fingers++;
1006 } else {
946 f->right = (p[0] & 0x20) >> 5; 1007 f->right = (p[0] & 0x20) >> 5;
947 f->middle = (p[0] & 0x10) >> 4; 1008 f->middle = (p[0] & 0x10) >> 4;
948 } 1009 }
949 1010
950 if (pkt_id == V7_PACKET_ID_TWO) 1011 /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
951 f->fingers = alps_get_mt_count(f->mt); 1012 if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
952 else if (pkt_id == V7_PACKET_ID_MULTI) 1013 f->mt[0].x = f->mt[1].x;
953 f->fingers = 3 + (p[5] & 0x03); 1014 f->mt[0].y = f->mt[1].y;
1015 f->mt[1].x = 0;
1016 f->mt[1].y = 0;
1017 }
954 1018
955 return 0; 1019 return 0;
956} 1020}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index f2b978026407..77ecf6d32237 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1520,6 +1520,8 @@ static int elantech_set_properties(struct elantech_data *etd)
1520 case 7: 1520 case 7:
1521 case 8: 1521 case 8:
1522 case 9: 1522 case 9:
1523 case 10:
1524 case 13:
1523 etd->hw_version = 4; 1525 etd->hw_version = 4;
1524 break; 1526 break;
1525 default: 1527 default:
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 30c8b6998808..354d47ecd66a 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
227TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH); 227TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
228TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME); 228TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
229TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); 229TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
230TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
230 231
231TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, 232TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
232 TP_DEF_PTSON); 233 TP_DEF_PTSON);
@@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
246 &psmouse_attr_upthresh.dattr.attr, 247 &psmouse_attr_upthresh.dattr.attr,
247 &psmouse_attr_ztime.dattr.attr, 248 &psmouse_attr_ztime.dattr.attr,
248 &psmouse_attr_jenks.dattr.attr, 249 &psmouse_attr_jenks.dattr.attr,
250 &psmouse_attr_drift_time.dattr.attr,
249 &psmouse_attr_press_to_select.dattr.attr, 251 &psmouse_attr_press_to_select.dattr.attr,
250 &psmouse_attr_skipback.dattr.attr, 252 &psmouse_attr_skipback.dattr.attr,
251 &psmouse_attr_ext_dev.dattr.attr, 253 &psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
312 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh); 314 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
313 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime); 315 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
314 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks); 316 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
317 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
315 318
316 /* toggles */ 319 /* toggles */
317 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select); 320 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
332 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh); 335 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
333 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime); 336 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
334 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks); 337 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
338 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
335 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia); 339 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
336 340
337 /* toggles */ 341 /* toggles */
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index ecd0547964a5..5617ed3a7d7a 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -70,6 +70,9 @@
70#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */ 70#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
71#define TP_Z_TIME 0x5E /* How sharp of a press */ 71#define TP_Z_TIME 0x5E /* How sharp of a press */
72#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */ 72#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
73#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
74 /* must last (x*107ms) for drift */
75 /* correction to occur */
73 76
74/* 77/*
75 * Toggling Flag bits 78 * Toggling Flag bits
@@ -120,6 +123,7 @@
120#define TP_DEF_UP_THRESH 0xFF 123#define TP_DEF_UP_THRESH 0xFF
121#define TP_DEF_Z_TIME 0x26 124#define TP_DEF_Z_TIME 0x26
122#define TP_DEF_JENKS_CURV 0x87 125#define TP_DEF_JENKS_CURV 0x87
126#define TP_DEF_DRIFT_TIME 0x05
123 127
124/* Toggles */ 128/* Toggles */
125#define TP_DEF_MB 0x00 129#define TP_DEF_MB 0x00
@@ -137,6 +141,7 @@ struct trackpoint_data
137 unsigned char draghys, mindrag; 141 unsigned char draghys, mindrag;
138 unsigned char thresh, upthresh; 142 unsigned char thresh, upthresh;
139 unsigned char ztime, jenks; 143 unsigned char ztime, jenks;
144 unsigned char drift_time;
140 145
141 /* toggles */ 146 /* toggles */
142 unsigned char press_to_select; 147 unsigned char press_to_select;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c66d1b53843e..764857b4e268 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
415 }, 415 },
416 }, 416 },
417 { 417 {
418 /* Acer Aspire 7738 */
419 .matches = {
420 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
421 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
422 },
423 },
424 {
418 /* Gericom Bellagio */ 425 /* Gericom Bellagio */
419 .matches = { 426 .matches = {
420 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), 427 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
@@ -745,6 +752,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
745 { } 752 { }
746}; 753};
747 754
755/*
756 * Some laptops need keyboard reset before probing for the trackpad to get
757 * it detected, initialised & finally work.
758 */
759static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
760 {
761 /* Gigabyte P35 v2 - Elantech touchpad */
762 .matches = {
763 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
764 DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
765 },
766 },
767 {
768 /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
769 .matches = {
770 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
771 DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
772 },
773 },
774 {
775 /* Gigabyte P34 - Elantech touchpad */
776 .matches = {
777 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
778 DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
779 },
780 },
781 { }
782};
783
748#endif /* CONFIG_X86 */ 784#endif /* CONFIG_X86 */
749 785
750#ifdef CONFIG_PNP 786#ifdef CONFIG_PNP
@@ -1040,6 +1076,9 @@ static int __init i8042_platform_init(void)
1040 if (dmi_check_system(i8042_dmi_dritek_table)) 1076 if (dmi_check_system(i8042_dmi_dritek_table))
1041 i8042_dritek = true; 1077 i8042_dritek = true;
1042 1078
1079 if (dmi_check_system(i8042_dmi_kbdreset_table))
1080 i8042_kbdreset = true;
1081
1043 /* 1082 /*
1044 * A20 was already enabled during early kernel init. But some buggy 1083 * A20 was already enabled during early kernel init. But some buggy
1045 * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to 1084 * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 924e4bf357fb..986a71c614b0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
67module_param_named(notimeout, i8042_notimeout, bool, 0); 67module_param_named(notimeout, i8042_notimeout, bool, 0);
68MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); 68MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
69 69
70static bool i8042_kbdreset;
71module_param_named(kbdreset, i8042_kbdreset, bool, 0);
72MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
73
70#ifdef CONFIG_X86 74#ifdef CONFIG_X86
71static bool i8042_dritek; 75static bool i8042_dritek;
72module_param_named(dritek, i8042_dritek, bool, 0); 76module_param_named(dritek, i8042_dritek, bool, 0);
@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
790 return -1; 794 return -1;
791 795
792/* 796/*
797 * Reset keyboard (needed on some laptops to successfully detect
798 * touchpad, e.g., some Gigabyte laptop models with Elantech
799 * touchpads).
800 */
801 if (i8042_kbdreset) {
802 pr_warn("Attempting to reset device connected to KBD port\n");
803 i8042_kbd_write(NULL, (unsigned char) 0xff);
804 }
805
806/*
793 * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and 807 * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
794 * used it for a PCI card or somethig else. 808 * used it for a PCI card or somethig else.
795 */ 809 */
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index bb070206223c..95ee92a91bd2 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -99,13 +99,9 @@
99#define MXT_T6_STATUS_COMSERR (1 << 2) 99#define MXT_T6_STATUS_COMSERR (1 << 2)
100 100
101/* MXT_GEN_POWER_T7 field */ 101/* MXT_GEN_POWER_T7 field */
102struct t7_config { 102#define MXT_POWER_IDLEACQINT 0
103 u8 idle; 103#define MXT_POWER_ACTVACQINT 1
104 u8 active; 104#define MXT_POWER_ACTV2IDLETO 2
105} __packed;
106
107#define MXT_POWER_CFG_RUN 0
108#define MXT_POWER_CFG_DEEPSLEEP 1
109 105
110/* MXT_GEN_ACQUIRE_T8 field */ 106/* MXT_GEN_ACQUIRE_T8 field */
111#define MXT_ACQUIRE_CHRGTIME 0 107#define MXT_ACQUIRE_CHRGTIME 0
@@ -117,6 +113,7 @@ struct t7_config {
117#define MXT_ACQUIRE_ATCHCALSTHR 7 113#define MXT_ACQUIRE_ATCHCALSTHR 7
118 114
119/* MXT_TOUCH_MULTI_T9 field */ 115/* MXT_TOUCH_MULTI_T9 field */
116#define MXT_TOUCH_CTRL 0
120#define MXT_T9_ORIENT 9 117#define MXT_T9_ORIENT 9
121#define MXT_T9_RANGE 18 118#define MXT_T9_RANGE 18
122 119
@@ -256,7 +253,6 @@ struct mxt_data {
256 bool update_input; 253 bool update_input;
257 u8 last_message_count; 254 u8 last_message_count;
258 u8 num_touchids; 255 u8 num_touchids;
259 struct t7_config t7_cfg;
260 256
261 /* Cached parameters from object table */ 257 /* Cached parameters from object table */
262 u16 T5_address; 258 u16 T5_address;
@@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
672 data->t6_status = status; 668 data->t6_status = status;
673} 669}
674 670
671static int mxt_write_object(struct mxt_data *data,
672 u8 type, u8 offset, u8 val)
673{
674 struct mxt_object *object;
675 u16 reg;
676
677 object = mxt_get_object(data, type);
678 if (!object || offset >= mxt_obj_size(object))
679 return -EINVAL;
680
681 reg = object->start_address;
682 return mxt_write_reg(data->client, reg + offset, val);
683}
684
675static void mxt_input_button(struct mxt_data *data, u8 *message) 685static void mxt_input_button(struct mxt_data *data, u8 *message)
676{ 686{
677 struct input_dev *input = data->input_dev; 687 struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@ err_free_object_table:
1742 return error; 1752 return error;
1743} 1753}
1744 1754
1745static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
1746{
1747 struct device *dev = &data->client->dev;
1748 int error;
1749 struct t7_config *new_config;
1750 struct t7_config deepsleep = { .active = 0, .idle = 0 };
1751
1752 if (sleep == MXT_POWER_CFG_DEEPSLEEP)
1753 new_config = &deepsleep;
1754 else
1755 new_config = &data->t7_cfg;
1756
1757 error = __mxt_write_reg(data->client, data->T7_address,
1758 sizeof(data->t7_cfg), new_config);
1759 if (error)
1760 return error;
1761
1762 dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
1763 new_config->active, new_config->idle);
1764
1765 return 0;
1766}
1767
1768static int mxt_init_t7_power_cfg(struct mxt_data *data)
1769{
1770 struct device *dev = &data->client->dev;
1771 int error;
1772 bool retry = false;
1773
1774recheck:
1775 error = __mxt_read_reg(data->client, data->T7_address,
1776 sizeof(data->t7_cfg), &data->t7_cfg);
1777 if (error)
1778 return error;
1779
1780 if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
1781 if (!retry) {
1782 dev_dbg(dev, "T7 cfg zero, resetting\n");
1783 mxt_soft_reset(data);
1784 retry = true;
1785 goto recheck;
1786 } else {
1787 dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
1788 data->t7_cfg.active = 20;
1789 data->t7_cfg.idle = 100;
1790 return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
1791 }
1792 }
1793
1794 dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
1795 data->t7_cfg.active, data->t7_cfg.idle);
1796 return 0;
1797}
1798
1799static int mxt_configure_objects(struct mxt_data *data, 1755static int mxt_configure_objects(struct mxt_data *data,
1800 const struct firmware *cfg) 1756 const struct firmware *cfg)
1801{ 1757{
@@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
1809 dev_warn(dev, "Error %d updating config\n", error); 1765 dev_warn(dev, "Error %d updating config\n", error);
1810 } 1766 }
1811 1767
1812 error = mxt_init_t7_power_cfg(data);
1813 if (error) {
1814 dev_err(dev, "Failed to initialize power cfg\n");
1815 return error;
1816 }
1817
1818 error = mxt_initialize_t9_input_device(data); 1768 error = mxt_initialize_t9_input_device(data);
1819 if (error) 1769 if (error)
1820 return error; 1770 return error;
@@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
2093 2043
2094static void mxt_start(struct mxt_data *data) 2044static void mxt_start(struct mxt_data *data)
2095{ 2045{
2096 mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN); 2046 /* Touch enable */
2097 2047 mxt_write_object(data,
2098 /* Recalibrate since chip has been in deep sleep */ 2048 MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
2099 mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
2100} 2049}
2101 2050
2102static void mxt_stop(struct mxt_data *data) 2051static void mxt_stop(struct mxt_data *data)
2103{ 2052{
2104 mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP); 2053 /* Touch disable */
2054 mxt_write_object(data,
2055 MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
2105} 2056}
2106 2057
2107static int mxt_input_open(struct input_dev *dev) 2058static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
2266 struct mxt_data *data = i2c_get_clientdata(client); 2217 struct mxt_data *data = i2c_get_clientdata(client);
2267 struct input_dev *input_dev = data->input_dev; 2218 struct input_dev *input_dev = data->input_dev;
2268 2219
2220 mxt_soft_reset(data);
2221
2269 mutex_lock(&input_dev->mutex); 2222 mutex_lock(&input_dev->mutex);
2270 2223
2271 if (input_dev->users) 2224 if (input_dev->users)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3793fcc7e5db..d4c24fb7704f 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
850} 850}
851 851
852#define EDT_ATTR_CHECKSET(name, reg) \ 852#define EDT_ATTR_CHECKSET(name, reg) \
853do { \
853 if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \ 854 if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
854 pdata->name <= edt_ft5x06_attr_##name.limit_high) \ 855 pdata->name <= edt_ft5x06_attr_##name.limit_high) \
855 edt_ft5x06_register_write(tsdata, reg, pdata->name) 856 edt_ft5x06_register_write(tsdata, reg, pdata->name); \
857} while (0)
856 858
857#define EDT_GET_PROP(name, reg) { \ 859#define EDT_GET_PROP(name, reg) { \
858 u32 val; \ 860 u32 val; \
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1232336b960e..40dfbc0444c0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
4029 if (action != BUS_NOTIFY_REMOVED_DEVICE) 4029 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4030 return 0; 4030 return 0;
4031 4031
4032 /*
4033 * If the device is still attached to a device driver we can't
4034 * tear down the domain yet as DMA mappings may still be in use.
4035 * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
4036 */
4037 if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
4038 return 0;
4039
4040 domain = find_domain(dev); 4032 domain = find_domain(dev);
4041 if (!domain) 4033 if (!domain)
4042 return 0; 4034 return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
4428 domain_remove_one_dev_info(old_domain, dev); 4420 domain_remove_one_dev_info(old_domain, dev);
4429 else 4421 else
4430 domain_remove_dev_info(old_domain); 4422 domain_remove_dev_info(old_domain);
4423
4424 if (!domain_type_is_vm_or_si(old_domain) &&
4425 list_empty(&old_domain->devices))
4426 domain_exit(old_domain);
4431 } 4427 }
4432 } 4428 }
4433 4429
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 68dfb0fd5ee9..748693192c20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
558 558
559static u64 ipmmu_page_prot(unsigned int prot, u64 type) 559static u64 ipmmu_page_prot(unsigned int prot, u64 type)
560{ 560{
561 u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF 561 u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
562 | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV 562 | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
563 | ARM_VMSA_PTE_NS | type; 563 | ARM_VMSA_PTE_NS | type;
564 564
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
568 if (prot & IOMMU_CACHE) 568 if (prot & IOMMU_CACHE)
569 pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; 569 pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
570 570
571 if (prot & IOMMU_EXEC) 571 if (prot & IOMMU_NOEXEC)
572 pgprot &= ~ARM_VMSA_PTE_XN; 572 pgprot |= ARM_VMSA_PTE_XN;
573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
574 /* If no access create a faulting entry to avoid TLB fills. */ 574 /* If no access create a faulting entry to avoid TLB fills. */
575 pgprot &= ~ARM_VMSA_PTE_PAGE; 575 pgprot &= ~ARM_VMSA_PTE_PAGE;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b2023af384b9..6a8b1ec4a48a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
1009 .remove = rk_iommu_remove, 1009 .remove = rk_iommu_remove,
1010 .driver = { 1010 .driver = {
1011 .name = "rk_iommu", 1011 .name = "rk_iommu",
1012 .owner = THIS_MODULE,
1013 .of_match_table = of_match_ptr(rk_iommu_dt_ids), 1012 .of_match_table = of_match_ptr(rk_iommu_dt_ids),
1014 }, 1013 },
1015}; 1014};
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index a82e542ffc21..0b380603a578 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
4880 byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ 4880 byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
4881 byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; 4881 byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
4882 byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; 4882 byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
4883 byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; 4883 byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
4884 byte force_mt_info = false; 4884 byte force_mt_info = false;
4885 byte dir; 4885 byte dir;
4886 dword d; 4886 dword d;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 26515c27ea8c..25e419752a7b 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
330 led_dat->sata = 0; 330 led_dat->sata = 0;
331 led_dat->cdev.brightness = LED_OFF; 331 led_dat->cdev.brightness = LED_OFF;
332 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 332 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
333 /*
334 * If available, expose the SATA activity blink capability through
335 * a "sata" sysfs attribute.
336 */
337 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
338 led_dat->cdev.groups = netxbig_led_groups;
339 led_dat->mode_addr = template->mode_addr; 333 led_dat->mode_addr = template->mode_addr;
340 led_dat->mode_val = template->mode_val; 334 led_dat->mode_val = template->mode_val;
341 led_dat->bright_addr = template->bright_addr; 335 led_dat->bright_addr = template->bright_addr;
342 led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; 336 led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
343 led_dat->timer = pdata->timer; 337 led_dat->timer = pdata->timer;
344 led_dat->num_timer = pdata->num_timer; 338 led_dat->num_timer = pdata->num_timer;
339 /*
340 * If available, expose the SATA activity blink capability through
341 * a "sata" sysfs attribute.
342 */
343 if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
344 led_dat->cdev.groups = netxbig_led_groups;
345 345
346 return led_classdev_register(&pdev->dev, &led_dat->cdev); 346 return led_classdev_register(&pdev->dev, &led_dat->cdev);
347} 347}
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index f956ef26c0ce..fb7493dcfb79 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -7,6 +7,7 @@
7#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45 7#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
8#define CHAMELEON_FILENAME_LEN 12 8#define CHAMELEON_FILENAME_LEN 12
9#define CHAMELEONV2_MAGIC 0xabce 9#define CHAMELEONV2_MAGIC 0xabce
10#define CHAM_HEADER_SIZE 0x200
10 11
11enum chameleon_descriptor_type { 12enum chameleon_descriptor_type {
12 CHAMELEON_DTYPE_GENERAL = 0x0, 13 CHAMELEON_DTYPE_GENERAL = 0x0,
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index b59181965643..5e1bd5db02c8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -17,6 +17,7 @@
17 17
18struct priv { 18struct priv {
19 struct mcb_bus *bus; 19 struct mcb_bus *bus;
20 phys_addr_t mapbase;
20 void __iomem *base; 21 void __iomem *base;
21}; 22};
22 23
@@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev)
31 32
32static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 33static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
33{ 34{
35 struct resource *res;
34 struct priv *priv; 36 struct priv *priv;
35 phys_addr_t mapbase;
36 int ret; 37 int ret;
37 int num_cells; 38 int num_cells;
38 unsigned long flags; 39 unsigned long flags;
@@ -47,19 +48,21 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
47 return -ENODEV; 48 return -ENODEV;
48 } 49 }
49 50
50 mapbase = pci_resource_start(pdev, 0); 51 priv->mapbase = pci_resource_start(pdev, 0);
51 if (!mapbase) { 52 if (!priv->mapbase) {
52 dev_err(&pdev->dev, "No PCI resource\n"); 53 dev_err(&pdev->dev, "No PCI resource\n");
53 goto err_start; 54 goto err_start;
54 } 55 }
55 56
56 ret = pci_request_region(pdev, 0, KBUILD_MODNAME); 57 res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
57 if (ret) { 58 KBUILD_MODNAME);
58 dev_err(&pdev->dev, "Failed to request PCI BARs\n"); 59 if (IS_ERR(res)) {
60 dev_err(&pdev->dev, "Failed to request PCI memory\n");
61 ret = PTR_ERR(res);
59 goto err_start; 62 goto err_start;
60 } 63 }
61 64
62 priv->base = pci_iomap(pdev, 0, 0); 65 priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
63 if (!priv->base) { 66 if (!priv->base) {
64 dev_err(&pdev->dev, "Cannot ioremap\n"); 67 dev_err(&pdev->dev, "Cannot ioremap\n");
65 ret = -ENOMEM; 68 ret = -ENOMEM;
@@ -84,7 +87,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
84 87
85 priv->bus->get_irq = mcb_pci_get_irq; 88 priv->bus->get_irq = mcb_pci_get_irq;
86 89
87 ret = chameleon_parse_cells(priv->bus, mapbase, priv->base); 90 ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
88 if (ret < 0) 91 if (ret < 0)
89 goto err_drvdata; 92 goto err_drvdata;
90 num_cells = ret; 93 num_cells = ret;
@@ -93,8 +96,10 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
93 96
94 mcb_bus_add_devices(priv->bus); 97 mcb_bus_add_devices(priv->bus);
95 98
99 return 0;
100
96err_drvdata: 101err_drvdata:
97 pci_iounmap(pdev, priv->base); 102 iounmap(priv->base);
98err_ioremap: 103err_ioremap:
99 pci_release_region(pdev, 0); 104 pci_release_region(pdev, 0);
100err_start: 105err_start:
@@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev)
107 struct priv *priv = pci_get_drvdata(pdev); 112 struct priv *priv = pci_get_drvdata(pdev);
108 113
109 mcb_release_bus(priv->bus); 114 mcb_release_bus(priv->bus);
115
116 iounmap(priv->base);
117 release_region(priv->mapbase, CHAM_HEADER_SIZE);
118 pci_disable_device(pdev);
110} 119}
111 120
112static const struct pci_device_id mcb_pci_tbl[] = { 121static const struct pci_device_id mcb_pci_tbl[] = {
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 8735543eacdb..493478989dbd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1127 schedule_zero(tc, virt_block, data_dest, cell, bio); 1127 schedule_zero(tc, virt_block, data_dest, cell, bio);
1128} 1128}
1129 1129
1130static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1131
1132static void check_for_space(struct pool *pool)
1133{
1134 int r;
1135 dm_block_t nr_free;
1136
1137 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1138 return;
1139
1140 r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1141 if (r)
1142 return;
1143
1144 if (nr_free)
1145 set_pool_mode(pool, PM_WRITE);
1146}
1147
1130/* 1148/*
1131 * A non-zero return indicates read_only or fail_io mode. 1149 * A non-zero return indicates read_only or fail_io mode.
1132 * Many callers don't care about the return value. 1150 * Many callers don't care about the return value.
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
1141 r = dm_pool_commit_metadata(pool->pmd); 1159 r = dm_pool_commit_metadata(pool->pmd);
1142 if (r) 1160 if (r)
1143 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); 1161 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1162 else
1163 check_for_space(pool);
1144 1164
1145 return r; 1165 return r;
1146} 1166}
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1159 } 1179 }
1160} 1180}
1161 1181
1162static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1163
1164static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 1182static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1165{ 1183{
1166 int r; 1184 int r;
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2155 pool->process_cell = process_cell_read_only; 2173 pool->process_cell = process_cell_read_only;
2156 pool->process_discard_cell = process_discard_cell; 2174 pool->process_discard_cell = process_discard_cell;
2157 pool->process_prepared_mapping = process_prepared_mapping; 2175 pool->process_prepared_mapping = process_prepared_mapping;
2158 pool->process_prepared_discard = process_prepared_discard_passdown; 2176 pool->process_prepared_discard = process_prepared_discard;
2159 2177
2160 if (!pool->pf.error_if_no_space && no_space_timeout) 2178 if (!pool->pf.error_if_no_space && no_space_timeout)
2161 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); 2179 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -3814,6 +3832,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3814 r = -EINVAL; 3832 r = -EINVAL;
3815 goto bad; 3833 goto bad;
3816 } 3834 }
3835 atomic_set(&tc->refcount, 1);
3836 init_completion(&tc->can_destroy);
3817 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3837 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3818 spin_unlock_irqrestore(&tc->pool->lock, flags); 3838 spin_unlock_irqrestore(&tc->pool->lock, flags);
3819 /* 3839 /*
@@ -3826,9 +3846,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3826 3846
3827 dm_put(pool_md); 3847 dm_put(pool_md);
3828 3848
3829 atomic_set(&tc->refcount, 1);
3830 init_completion(&tc->can_destroy);
3831
3832 return 0; 3849 return 0;
3833 3850
3834bad: 3851bad:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4c06585bf165..b98cd9d84435 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md)
899 899
900static void clone_endio(struct bio *bio, int error) 900static void clone_endio(struct bio *bio, int error)
901{ 901{
902 int r = 0; 902 int r = error;
903 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 903 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
904 struct dm_io *io = tio->io; 904 struct dm_io *io = tio->io;
905 struct mapped_device *md = tio->io->md; 905 struct mapped_device *md = tio->io->md;
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e2f9df1c0c36..2d7fae94c861 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
519 [STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB, 519 [STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
520 [STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB, 520 [STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
521 [STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB, 521 [STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
522 [STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
522 [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB, 523 [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
523 [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB, 524 [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
524 [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB, 525 [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
667 [STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW, 668 [STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
668 [STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW, 669 [STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
669 [STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW, 670 [STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
671 [STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
670 [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW, 672 [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
671 [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW, 673 [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
672}; 674};
@@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
750 [STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB, 752 [STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
751 [STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB, 753 [STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
752 [STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB, 754 [STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
755 [STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
756 [STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
753 [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB, 757 [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
754 [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB, 758 [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
755 [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB, 759 [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index bee0abf82040..84adb46b3e2f 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
188#define STMPE1601_REG_GPIO_ED_MSB 0x8A 188#define STMPE1601_REG_GPIO_ED_MSB 0x8A
189#define STMPE1601_REG_GPIO_RE_LSB 0x8D 189#define STMPE1601_REG_GPIO_RE_LSB 0x8D
190#define STMPE1601_REG_GPIO_FE_LSB 0x8F 190#define STMPE1601_REG_GPIO_FE_LSB 0x8F
191#define STMPE1601_REG_GPIO_PU_LSB 0x91
191#define STMPE1601_REG_GPIO_AF_U_MSB 0x92 192#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
192 193
193#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3) 194#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
@@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
276#define STMPE24XX_REG_GPEDR_MSB 0x8C 277#define STMPE24XX_REG_GPEDR_MSB 0x8C
277#define STMPE24XX_REG_GPRER_LSB 0x91 278#define STMPE24XX_REG_GPRER_LSB 0x91
278#define STMPE24XX_REG_GPFER_LSB 0x94 279#define STMPE24XX_REG_GPFER_LSB 0x94
280#define STMPE24XX_REG_GPPUR_LSB 0x97
281#define STMPE24XX_REG_GPPDR_LSB 0x9a
279#define STMPE24XX_REG_GPAFR_U_MSB 0x9B 282#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
280 283
281#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3) 284#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
100 return 0; 100 return 0;
101} 101}
102 102
103static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104{
105 struct cxl_context *ctx = vma->vm_file->private_data;
106 unsigned long address = (unsigned long)vmf->virtual_address;
107 u64 area, offset;
108
109 offset = vmf->pgoff << PAGE_SHIFT;
110
111 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
112 __func__, ctx->pe, address, offset);
113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS;
118 } else {
119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size)
121 return VM_FAULT_SIGBUS;
122 }
123
124 mutex_lock(&ctx->status_mutex);
125
126 if (ctx->status != STARTED) {
127 mutex_unlock(&ctx->status_mutex);
128 pr_devel("%s: Context not started, failing problem state access\n", __func__);
129 return VM_FAULT_SIGBUS;
130 }
131
132 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
133
134 mutex_unlock(&ctx->status_mutex);
135
136 return VM_FAULT_NOPAGE;
137}
138
139static const struct vm_operations_struct cxl_mmap_vmops = {
140 .fault = cxl_mmap_fault,
141};
142
103/* 143/*
104 * Map a per-context mmio space into the given vma. 144 * Map a per-context mmio space into the given vma.
105 */ 145 */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
108 u64 len = vma->vm_end - vma->vm_start; 148 u64 len = vma->vm_end - vma->vm_start;
109 len = min(len, ctx->psn_size); 149 len = min(len, ctx->psn_size);
110 150
111 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
112 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 152 /* make sure there is a valid per process space for this AFU */
113 return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); 153 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
114 } 154 pr_devel("AFU doesn't support mmio space\n");
155 return -EINVAL;
156 }
115 157
116 /* make sure there is a valid per process space for this AFU */ 158 /* Can't mmap until the AFU is enabled */
117 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 159 if (!ctx->afu->enabled)
118 pr_devel("AFU doesn't support mmio space\n"); 160 return -EBUSY;
119 return -EINVAL;
120 } 161 }
121 162
122 /* Can't mmap until the AFU is enabled */
123 if (!ctx->afu->enabled)
124 return -EBUSY;
125
126 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, 163 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
127 ctx->psn_phys, ctx->pe , ctx->master); 164 ctx->psn_phys, ctx->pe , ctx->master);
128 165
166 vma->vm_flags |= VM_IO | VM_PFNMAP;
129 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 167 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
130 return vm_iomap_memory(vma, ctx->psn_phys, len); 168 vma->vm_ops = &cxl_mmap_vmops;
169 return 0;
131} 170}
132 171
133/* 172/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
150 afu_release_irqs(ctx); 189 afu_release_irqs(ctx);
151 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 190 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
152 wake_up_all(&ctx->wq); 191 wake_up_all(&ctx->wq);
153
154 /* Release Problem State Area mapping */
155 mutex_lock(&ctx->mapping_lock);
156 if (ctx->mapping)
157 unmap_mapping_range(ctx->mapping, 0, 0, 1);
158 mutex_unlock(&ctx->mapping_lock);
159} 192}
160 193
161/* 194/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
184 * created and torn down after the IDR removed 217 * created and torn down after the IDR removed
185 */ 218 */
186 __detach_context(ctx); 219 __detach_context(ctx);
220
221 /*
222 * We are force detaching - remove any active PSA mappings so
223 * userspace cannot interfere with the card if it comes back.
224 * Easiest way to exercise this is to unbind and rebind the
225 * driver via sysfs while it is in use.
226 */
227 mutex_lock(&ctx->mapping_lock);
228 if (ctx->mapping)
229 unmap_mapping_range(ctx->mapping, 0, 0, 1);
230 mutex_unlock(&ctx->mapping_lock);
187 } 231 }
188 mutex_unlock(&afu->contexts_lock); 232 mutex_unlock(&afu->contexts_lock);
189} 233}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb37..b15d8113877c 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
140 140
141 pr_devel("%s: pe: %i\n", __func__, ctx->pe); 141 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
142 142
143 mutex_lock(&ctx->status_mutex); 143 /* Do this outside the status_mutex to avoid a circular dependency with
144 if (ctx->status != OPENED) { 144 * the locking in cxl_mmap_fault() */
145 rc = -EIO;
146 goto out;
147 }
148
149 if (copy_from_user(&work, uwork, 145 if (copy_from_user(&work, uwork,
150 sizeof(struct cxl_ioctl_start_work))) { 146 sizeof(struct cxl_ioctl_start_work))) {
151 rc = -EFAULT; 147 rc = -EFAULT;
152 goto out; 148 goto out;
153 } 149 }
154 150
151 mutex_lock(&ctx->status_mutex);
152 if (ctx->status != OPENED) {
153 rc = -EIO;
154 goto out;
155 }
156
155 /* 157 /*
156 * if any of the reserved fields are set or any of the unused 158 * if any of the reserved fields are set or any of the unused
157 * flags are set it's invalid 159 * flags are set it's invalid
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index ff2755062b44..06ff0a2ec960 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
234 struct mei_me_hw *hw = to_me_hw(dev); 234 struct mei_me_hw *hw = to_me_hw(dev);
235 u32 hcsr = mei_hcsr_read(hw); 235 u32 hcsr = mei_hcsr_read(hw);
236 236
237 /* H_RST may be found lit before reset is started,
238 * for example if preceding reset flow hasn't completed.
239 * In that case asserting H_RST will be ignored, therefore
240 * we need to clean H_RST bit to start a successful reset sequence.
241 */
242 if ((hcsr & H_RST) == H_RST) {
243 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
244 hcsr &= ~H_RST;
245 mei_me_reg_write(hw, H_CSR, hcsr);
246 hcsr = mei_hcsr_read(hw);
247 }
248
237 hcsr |= H_RST | H_IG | H_IS; 249 hcsr |= H_RST | H_IG | H_IS;
238 250
239 if (intr_enable) 251 if (intr_enable)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02ad79229f65..7466ce098e60 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
886 unsigned idx, bus_width = 0; 886 unsigned idx, bus_width = 0;
887 int err = 0; 887 int err = 0;
888 888
889 if (!mmc_can_ext_csd(card) && 889 if (!mmc_can_ext_csd(card) ||
890 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) 890 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
891 return 0; 891 return 0;
892 892
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index e3e56d35f0ee..970314e0aac8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
247 { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, 247 { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
248 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, 248 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
249 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, 249 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
250 { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
250 { "PNP0D40" }, 251 { "PNP0D40" },
251 { }, 252 { },
252}; 253};
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
257 { "INT33BB" }, 258 { "INT33BB" },
258 { "INT33C6" }, 259 { "INT33C6" },
259 { "INT3436" }, 260 { "INT3436" },
261 { "INT344D" },
260 { "PNP0D40" }, 262 { "PNP0D40" },
261 { }, 263 { },
262}; 264};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 03427755b902..4f38554ce679 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
993 .subdevice = PCI_ANY_ID, 993 .subdevice = PCI_ANY_ID,
994 .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, 994 .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
995 }, 995 },
996
997 {
998 .vendor = PCI_VENDOR_ID_INTEL,
999 .device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
1000 .subvendor = PCI_ANY_ID,
1001 .subdevice = PCI_ANY_ID,
1002 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1003 },
1004
1005 {
1006 .vendor = PCI_VENDOR_ID_INTEL,
1007 .device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
1008 .subvendor = PCI_ANY_ID,
1009 .subdevice = PCI_ANY_ID,
1010 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
1011 },
1012
1013 {
1014 .vendor = PCI_VENDOR_ID_INTEL,
1015 .device = PCI_DEVICE_ID_INTEL_SPT_SD,
1016 .subvendor = PCI_ANY_ID,
1017 .subdevice = PCI_ANY_ID,
1018 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
1019 },
1020
996 { 1021 {
997 .vendor = PCI_VENDOR_ID_O2, 1022 .vendor = PCI_VENDOR_ID_O2,
998 .device = PCI_DEVICE_ID_O2_8120, 1023 .device = PCI_DEVICE_ID_O2_8120,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d57c3d169914..1ec684d06d54 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -21,6 +21,9 @@
21#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 21#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
22#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 22#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
23#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 23#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
24#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
25#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
26#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
24 27
25/* 28/*
26 * PCI registers 29 * PCI registers
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 45238871192d..ca3424e7ef71 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
300 if (IS_ERR(host)) 300 if (IS_ERR(host))
301 return PTR_ERR(host); 301 return PTR_ERR(host);
302 302
303 if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
304 ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
305 if (ret < 0)
306 goto err_mbus_win;
307 }
308
309
310 pltfm_host = sdhci_priv(host); 303 pltfm_host = sdhci_priv(host);
311 pltfm_host->priv = pxa; 304 pltfm_host->priv = pxa;
312 305
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
325 if (!IS_ERR(pxa->clk_core)) 318 if (!IS_ERR(pxa->clk_core))
326 clk_prepare_enable(pxa->clk_core); 319 clk_prepare_enable(pxa->clk_core);
327 320
321 if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
322 ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
323 if (ret < 0)
324 goto err_mbus_win;
325 }
326
328 /* enable 1/8V DDR capable */ 327 /* enable 1/8V DDR capable */
329 host->mmc->caps |= MMC_CAP_1_8V_DDR; 328 host->mmc->caps |= MMC_CAP_1_8V_DDR;
330 329
@@ -396,11 +395,11 @@ err_add_host:
396 pm_runtime_disable(&pdev->dev); 395 pm_runtime_disable(&pdev->dev);
397err_of_parse: 396err_of_parse:
398err_cd_req: 397err_cd_req:
398err_mbus_win:
399 clk_disable_unprepare(pxa->clk_io); 399 clk_disable_unprepare(pxa->clk_io);
400 if (!IS_ERR(pxa->clk_core)) 400 if (!IS_ERR(pxa->clk_core))
401 clk_disable_unprepare(pxa->clk_core); 401 clk_disable_unprepare(pxa->clk_core);
402err_clk_get: 402err_clk_get:
403err_mbus_win:
404 sdhci_pltfm_free(pdev); 403 sdhci_pltfm_free(pdev);
405 return ret; 404 return ret;
406} 405}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cbb245b58538..f1a488ee432f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
259 259
260 del_timer_sync(&host->tuning_timer); 260 del_timer_sync(&host->tuning_timer);
261 host->flags &= ~SDHCI_NEEDS_RETUNING; 261 host->flags &= ~SDHCI_NEEDS_RETUNING;
262 host->mmc->max_blk_count =
263 (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
264 } 262 }
265 sdhci_enable_card_detection(host); 263 sdhci_enable_card_detection(host);
266} 264}
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1273 spin_unlock_irq(&host->lock); 1271 spin_unlock_irq(&host->lock);
1274 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1272 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1275 spin_lock_irq(&host->lock); 1273 spin_lock_irq(&host->lock);
1274
1275 if (mode != MMC_POWER_OFF)
1276 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1277 else
1278 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1279
1276 return; 1280 return;
1277 } 1281 }
1278 1282
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1353 1357
1354 sdhci_runtime_pm_get(host); 1358 sdhci_runtime_pm_get(host);
1355 1359
1360 present = mmc_gpio_get_cd(host->mmc);
1361
1356 spin_lock_irqsave(&host->lock, flags); 1362 spin_lock_irqsave(&host->lock, flags);
1357 1363
1358 WARN_ON(host->mrq != NULL); 1364 WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1381 * zero: cd-gpio is used, and card is removed 1387 * zero: cd-gpio is used, and card is removed
1382 * one: cd-gpio is used, and card is present 1388 * one: cd-gpio is used, and card is present
1383 */ 1389 */
1384 present = mmc_gpio_get_cd(host->mmc);
1385 if (present < 0) { 1390 if (present < 0) {
1386 /* If polling, assume that the card is always present. */ 1391 /* If polling, assume that the card is always present. */
1387 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 1392 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
1880 return !(present_state & SDHCI_DATA_LVL_MASK); 1885 return !(present_state & SDHCI_DATA_LVL_MASK);
1881} 1886}
1882 1887
1888static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1889{
1890 struct sdhci_host *host = mmc_priv(mmc);
1891 unsigned long flags;
1892
1893 spin_lock_irqsave(&host->lock, flags);
1894 host->flags |= SDHCI_HS400_TUNING;
1895 spin_unlock_irqrestore(&host->lock, flags);
1896
1897 return 0;
1898}
1899
1883static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1900static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1884{ 1901{
1885 struct sdhci_host *host = mmc_priv(mmc); 1902 struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1887 int tuning_loop_counter = MAX_TUNING_LOOP; 1904 int tuning_loop_counter = MAX_TUNING_LOOP;
1888 int err = 0; 1905 int err = 0;
1889 unsigned long flags; 1906 unsigned long flags;
1907 unsigned int tuning_count = 0;
1908 bool hs400_tuning;
1890 1909
1891 sdhci_runtime_pm_get(host); 1910 sdhci_runtime_pm_get(host);
1892 spin_lock_irqsave(&host->lock, flags); 1911 spin_lock_irqsave(&host->lock, flags);
1893 1912
1913 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1914 host->flags &= ~SDHCI_HS400_TUNING;
1915
1916 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1917 tuning_count = host->tuning_count;
1918
1894 /* 1919 /*
1895 * The Host Controller needs tuning only in case of SDR104 mode 1920 * The Host Controller needs tuning only in case of SDR104 mode
1896 * and for SDR50 mode when Use Tuning for SDR50 is set in the 1921 * and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1899 * tuning function has to be executed. 1924 * tuning function has to be executed.
1900 */ 1925 */
1901 switch (host->timing) { 1926 switch (host->timing) {
1927 /* HS400 tuning is done in HS200 mode */
1902 case MMC_TIMING_MMC_HS400: 1928 case MMC_TIMING_MMC_HS400:
1929 err = -EINVAL;
1930 goto out_unlock;
1931
1903 case MMC_TIMING_MMC_HS200: 1932 case MMC_TIMING_MMC_HS200:
1933 /*
1934 * Periodic re-tuning for HS400 is not expected to be needed, so
1935 * disable it here.
1936 */
1937 if (hs400_tuning)
1938 tuning_count = 0;
1939 break;
1940
1904 case MMC_TIMING_UHS_SDR104: 1941 case MMC_TIMING_UHS_SDR104:
1905 break; 1942 break;
1906 1943
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1911 /* FALLTHROUGH */ 1948 /* FALLTHROUGH */
1912 1949
1913 default: 1950 default:
1914 spin_unlock_irqrestore(&host->lock, flags); 1951 goto out_unlock;
1915 sdhci_runtime_pm_put(host);
1916 return 0;
1917 } 1952 }
1918 1953
1919 if (host->ops->platform_execute_tuning) { 1954 if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2037 } 2072 }
2038 2073
2039out: 2074out:
2040 /* 2075 host->flags &= ~SDHCI_NEEDS_RETUNING;
2041 * If this is the very first time we are here, we start the retuning 2076
2042 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING 2077 if (tuning_count) {
2043 * flag won't be set, we check this condition before actually starting
2044 * the timer.
2045 */
2046 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
2047 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
2048 host->flags |= SDHCI_USING_RETUNING_TIMER; 2078 host->flags |= SDHCI_USING_RETUNING_TIMER;
2049 mod_timer(&host->tuning_timer, jiffies + 2079 mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
2050 host->tuning_count * HZ);
2051 /* Tuning mode 1 limits the maximum data length to 4MB */
2052 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
2053 } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2054 host->flags &= ~SDHCI_NEEDS_RETUNING;
2055 /* Reload the new initial value for timer */
2056 mod_timer(&host->tuning_timer, jiffies +
2057 host->tuning_count * HZ);
2058 } 2080 }
2059 2081
2060 /* 2082 /*
@@ -2070,6 +2092,7 @@ out:
2070 2092
2071 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2093 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2072 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2094 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2095out_unlock:
2073 spin_unlock_irqrestore(&host->lock, flags); 2096 spin_unlock_irqrestore(&host->lock, flags);
2074 sdhci_runtime_pm_put(host); 2097 sdhci_runtime_pm_put(host);
2075 2098
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
2110{ 2133{
2111 struct sdhci_host *host = mmc_priv(mmc); 2134 struct sdhci_host *host = mmc_priv(mmc);
2112 unsigned long flags; 2135 unsigned long flags;
2136 int present;
2113 2137
2114 /* First check if client has provided their own card event */ 2138 /* First check if client has provided their own card event */
2115 if (host->ops->card_event) 2139 if (host->ops->card_event)
2116 host->ops->card_event(host); 2140 host->ops->card_event(host);
2117 2141
2142 present = sdhci_do_get_cd(host);
2143
2118 spin_lock_irqsave(&host->lock, flags); 2144 spin_lock_irqsave(&host->lock, flags);
2119 2145
2120 /* Check host->mrq first in case we are runtime suspended */ 2146 /* Check host->mrq first in case we are runtime suspended */
2121 if (host->mrq && !sdhci_do_get_cd(host)) { 2147 if (host->mrq && !present) {
2122 pr_err("%s: Card removed during transfer!\n", 2148 pr_err("%s: Card removed during transfer!\n",
2123 mmc_hostname(host->mmc)); 2149 mmc_hostname(host->mmc));
2124 pr_err("%s: Resetting controller.\n", 2150 pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
2142 .hw_reset = sdhci_hw_reset, 2168 .hw_reset = sdhci_hw_reset,
2143 .enable_sdio_irq = sdhci_enable_sdio_irq, 2169 .enable_sdio_irq = sdhci_enable_sdio_irq,
2144 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2170 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2171 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2145 .execute_tuning = sdhci_execute_tuning, 2172 .execute_tuning = sdhci_execute_tuning,
2146 .card_event = sdhci_card_event, 2173 .card_event = sdhci_card_event,
2147 .card_busy = sdhci_card_busy, 2174 .card_busy = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
3260 mmc->max_segs = SDHCI_MAX_SEGS; 3287 mmc->max_segs = SDHCI_MAX_SEGS;
3261 3288
3262 /* 3289 /*
3263 * Maximum number of sectors in one transfer. Limited by DMA boundary 3290 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3264 * size (512KiB). 3291 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3292 * is less anyway.
3265 */ 3293 */
3266 mmc->max_req_size = 524288; 3294 mmc->max_req_size = 524288;
3267 3295
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 184c434ae305..0dceba1a2ba1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1648,7 +1648,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1648 /* slave is not a slave or master is not master of this slave */ 1648 /* slave is not a slave or master is not master of this slave */
1649 if (!(slave_dev->flags & IFF_SLAVE) || 1649 if (!(slave_dev->flags & IFF_SLAVE) ||
1650 !netdev_has_upper_dev(slave_dev, bond_dev)) { 1650 !netdev_has_upper_dev(slave_dev, bond_dev)) {
1651 netdev_err(bond_dev, "cannot release %s\n", 1651 netdev_dbg(bond_dev, "cannot release %s\n",
1652 slave_dev->name); 1652 slave_dev->name);
1653 return -EINVAL; 1653 return -EINVAL;
1654 } 1654 }
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index a5fefb9059c5..b306210b02b7 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -257,7 +257,6 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
257 struct vringh_kiov *riov = &cfv->ctx.riov; 257 struct vringh_kiov *riov = &cfv->ctx.riov;
258 unsigned int skb_len; 258 unsigned int skb_len;
259 259
260again:
261 do { 260 do {
262 skb = NULL; 261 skb = NULL;
263 262
@@ -322,7 +321,6 @@ exit:
322 napi_schedule_prep(napi)) { 321 napi_schedule_prep(napi)) {
323 vringh_notify_disable_kern(cfv->vr_rx); 322 vringh_notify_disable_kern(cfv->vr_rx);
324 __napi_schedule(napi); 323 __napi_schedule(napi);
325 goto again;
326 } 324 }
327 break; 325 break;
328 326
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f363972cd77d..e36d10520e24 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
103 mask = 1 << raminit->bits.start | 1 << raminit->bits.done; 103 mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
104 regmap_read(raminit->syscon, raminit->reg, &ctrl); 104 regmap_read(raminit->syscon, raminit->reg, &ctrl);
105 105
106 /* We clear the done and start bit first. The start bit is 106 /* We clear the start bit first. The start bit is
107 * looking at the 0 -> transition, but is not self clearing; 107 * looking at the 0 -> transition, but is not self clearing;
108 * And we clear the init done bit as well.
109 * NOTE: DONE must be written with 1 to clear it. 108 * NOTE: DONE must be written with 1 to clear it.
109 * We can't clear the DONE bit here using regmap_update_bits()
110 * as it will bypass the write if initial condition is START:0 DONE:1
111 * e.g. on DRA7 which needs START pulse.
110 */ 112 */
111 ctrl &= ~(1 << raminit->bits.start); 113 ctrl &= ~mask; /* START = 0, DONE = 0 */
112 ctrl |= 1 << raminit->bits.done; 114 regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
113 regmap_write(raminit->syscon, raminit->reg, ctrl);
114 115
115 ctrl &= ~(1 << raminit->bits.done); 116 /* check if START bit is 0. Ignore DONE bit for now
116 c_can_hw_raminit_wait_syscon(priv, mask, ctrl); 117 * as it can be either 0 or 1.
118 */
119 c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl);
117 120
118 if (enable) { 121 if (enable) {
119 /* Set start bit and wait for the done bit. */ 122 /* Clear DONE bit & set START bit. */
120 ctrl |= 1 << raminit->bits.start; 123 ctrl |= 1 << raminit->bits.start;
121 regmap_write(raminit->syscon, raminit->reg, ctrl); 124 /* DONE must be written with 1 to clear it */
122 125 ctrl |= 1 << raminit->bits.done;
126 regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
127 /* prevent further clearing of DONE bit */
128 ctrl &= ~(1 << raminit->bits.done);
123 /* clear START bit if start pulse is needed */ 129 /* clear START bit if start pulse is needed */
124 if (raminit->needs_pulse) { 130 if (raminit->needs_pulse) {
125 ctrl &= ~(1 << raminit->bits.start); 131 ctrl &= ~(1 << raminit->bits.start);
126 regmap_write(raminit->syscon, raminit->reg, ctrl); 132 regmap_update_bits(raminit->syscon, raminit->reg,
133 mask, ctrl);
127 } 134 }
128 135
129 ctrl |= 1 << raminit->bits.done; 136 ctrl |= 1 << raminit->bits.done;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3ec8f6f25e5f..847c1f813261 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev,
807 if (dev->flags & IFF_UP) 807 if (dev->flags & IFF_UP)
808 return -EBUSY; 808 return -EBUSY;
809 cm = nla_data(data[IFLA_CAN_CTRLMODE]); 809 cm = nla_data(data[IFLA_CAN_CTRLMODE]);
810 if (cm->flags & ~priv->ctrlmode_supported) 810
811 /* check whether changed bits are allowed to be modified */
812 if (cm->mask & ~priv->ctrlmode_supported)
811 return -EOPNOTSUPP; 813 return -EOPNOTSUPP;
814
815 /* clear bits to be modified and copy the flag values */
812 priv->ctrlmode &= ~cm->mask; 816 priv->ctrlmode &= ~cm->mask;
813 priv->ctrlmode |= cm->flags; 817 priv->ctrlmode |= (cm->flags & cm->mask);
814 818
815 /* CAN_CTRLMODE_FD can only be set when driver supports FD */ 819 /* CAN_CTRLMODE_FD can only be set when driver supports FD */
816 if (priv->ctrlmode & CAN_CTRLMODE_FD) 820 if (priv->ctrlmode & CAN_CTRLMODE_FD)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d7bc462aafdc..244529881be9 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void)
955 priv->can.data_bittiming_const = &m_can_data_bittiming_const; 955 priv->can.data_bittiming_const = &m_can_data_bittiming_const;
956 priv->can.do_set_mode = m_can_set_mode; 956 priv->can.do_set_mode = m_can_set_mode;
957 priv->can.do_get_berr_counter = m_can_get_berr_counter; 957 priv->can.do_get_berr_counter = m_can_get_berr_counter;
958
959 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
960 priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
961
962 /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
958 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 963 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
959 CAN_CTRLMODE_LISTENONLY | 964 CAN_CTRLMODE_LISTENONLY |
960 CAN_CTRLMODE_BERR_REPORTING | 965 CAN_CTRLMODE_BERR_REPORTING |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 541fb7a05625..c32cd61073bc 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
520 skb = alloc_can_err_skb(priv->netdev, &cf); 520 skb = alloc_can_err_skb(priv->netdev, &cf);
521 if (skb) { 521 if (skb) {
522 cf->can_id |= CAN_ERR_RESTARTED; 522 cf->can_id |= CAN_ERR_RESTARTED;
523 netif_rx(skb);
524 523
525 stats->rx_packets++; 524 stats->rx_packets++;
526 stats->rx_bytes += cf->can_dlc; 525 stats->rx_bytes += cf->can_dlc;
526 netif_rx(skb);
527 } else { 527 } else {
528 netdev_err(priv->netdev, 528 netdev_err(priv->netdev,
529 "No memory left for err_skb\n"); 529 "No memory left for err_skb\n");
@@ -770,10 +770,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
770 770
771 priv->can.state = new_state; 771 priv->can.state = new_state;
772 772
773 netif_rx(skb);
774
775 stats->rx_packets++; 773 stats->rx_packets++;
776 stats->rx_bytes += cf->can_dlc; 774 stats->rx_bytes += cf->can_dlc;
775 netif_rx(skb);
777} 776}
778 777
779static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, 778static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -805,10 +804,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
805 stats->rx_over_errors++; 804 stats->rx_over_errors++;
806 stats->rx_errors++; 805 stats->rx_errors++;
807 806
808 netif_rx(skb);
809
810 stats->rx_packets++; 807 stats->rx_packets++;
811 stats->rx_bytes += cf->can_dlc; 808 stats->rx_bytes += cf->can_dlc;
809 netif_rx(skb);
812 } 810 }
813} 811}
814 812
@@ -887,10 +885,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
887 cf->can_dlc); 885 cf->can_dlc);
888 } 886 }
889 887
890 netif_rx(skb);
891
892 stats->rx_packets++; 888 stats->rx_packets++;
893 stats->rx_bytes += cf->can_dlc; 889 stats->rx_bytes += cf->can_dlc;
890 netif_rx(skb);
894} 891}
895 892
896static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, 893static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
@@ -1246,6 +1243,9 @@ static int kvaser_usb_close(struct net_device *netdev)
1246 if (err) 1243 if (err)
1247 netdev_warn(netdev, "Cannot stop device, error %d\n", err); 1244 netdev_warn(netdev, "Cannot stop device, error %d\n", err);
1248 1245
1246 /* reset tx contexts */
1247 kvaser_usb_unlink_tx_urbs(priv);
1248
1249 priv->can.state = CAN_STATE_STOPPED; 1249 priv->can.state = CAN_STATE_STOPPED;
1250 close_candev(priv->netdev); 1250 close_candev(priv->netdev);
1251 1251
@@ -1294,12 +1294,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1294 if (!urb) { 1294 if (!urb) {
1295 netdev_err(netdev, "No memory left for URBs\n"); 1295 netdev_err(netdev, "No memory left for URBs\n");
1296 stats->tx_dropped++; 1296 stats->tx_dropped++;
1297 goto nourbmem; 1297 dev_kfree_skb(skb);
1298 return NETDEV_TX_OK;
1298 } 1299 }
1299 1300
1300 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); 1301 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
1301 if (!buf) { 1302 if (!buf) {
1302 stats->tx_dropped++; 1303 stats->tx_dropped++;
1304 dev_kfree_skb(skb);
1303 goto nobufmem; 1305 goto nobufmem;
1304 } 1306 }
1305 1307
@@ -1334,6 +1336,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1334 } 1336 }
1335 } 1337 }
1336 1338
1339 /* This should never happen; it implies a flow control bug */
1337 if (!context) { 1340 if (!context) {
1338 netdev_warn(netdev, "cannot find free context\n"); 1341 netdev_warn(netdev, "cannot find free context\n");
1339 ret = NETDEV_TX_BUSY; 1342 ret = NETDEV_TX_BUSY;
@@ -1364,9 +1367,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1364 if (unlikely(err)) { 1367 if (unlikely(err)) {
1365 can_free_echo_skb(netdev, context->echo_index); 1368 can_free_echo_skb(netdev, context->echo_index);
1366 1369
1367 skb = NULL; /* set to NULL to avoid double free in
1368 * dev_kfree_skb(skb) */
1369
1370 atomic_dec(&priv->active_tx_urbs); 1370 atomic_dec(&priv->active_tx_urbs);
1371 usb_unanchor_urb(urb); 1371 usb_unanchor_urb(urb);
1372 1372
@@ -1388,8 +1388,6 @@ releasebuf:
1388 kfree(buf); 1388 kfree(buf);
1389nobufmem: 1389nobufmem:
1390 usb_free_urb(urb); 1390 usb_free_urb(urb);
1391nourbmem:
1392 dev_kfree_skb(skb);
1393 return ret; 1391 return ret;
1394} 1392}
1395 1393
@@ -1502,6 +1500,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1502 struct kvaser_usb_net_priv *priv; 1500 struct kvaser_usb_net_priv *priv;
1503 int i, err; 1501 int i, err;
1504 1502
1503 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1504 if (err)
1505 return err;
1506
1505 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); 1507 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
1506 if (!netdev) { 1508 if (!netdev) {
1507 dev_err(&intf->dev, "Cannot alloc candev\n"); 1509 dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1606,9 +1608,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1606 1608
1607 usb_set_intfdata(intf, dev); 1609 usb_set_intfdata(intf, dev);
1608 1610
1609 for (i = 0; i < MAX_NET_DEVICES; i++)
1610 kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
1611
1612 err = kvaser_usb_get_software_info(dev); 1611 err = kvaser_usb_get_software_info(dev);
1613 if (err) { 1612 if (err) {
1614 dev_err(&intf->dev, 1613 dev_err(&intf->dev,
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 89c8d9fc97de..57e97910c728 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
246 246
247 if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { 247 if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
248 dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); 248 dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
249 return -ENODEV; 249 goto err_out;
250 } 250 }
251 251
252 if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { 252 if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
253 dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", 253 dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
254 NE_IO_EXTENT, ioaddr); 254 NE_IO_EXTENT, ioaddr);
255 return -EBUSY; 255 goto err_out;
256 } 256 }
257 257
258 reg0 = inb(ioaddr); 258 reg0 = inb(ioaddr);
@@ -392,6 +392,8 @@ err_out_free_netdev:
392 free_netdev (dev); 392 free_netdev (dev);
393err_out_free_res: 393err_out_free_res:
394 release_region (ioaddr, NE_IO_EXTENT); 394 release_region (ioaddr, NE_IO_EXTENT);
395err_out:
396 pci_disable_device(pdev);
395 return -ENODEV; 397 return -ENODEV;
396} 398}
397 399
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index df76050d0a9d..eadcb053807e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig"
156source "drivers/net/ethernet/renesas/Kconfig" 156source "drivers/net/ethernet/renesas/Kconfig"
157source "drivers/net/ethernet/rdc/Kconfig" 157source "drivers/net/ethernet/rdc/Kconfig"
158source "drivers/net/ethernet/rocker/Kconfig" 158source "drivers/net/ethernet/rocker/Kconfig"
159
160config S6GMAC
161 tristate "S6105 GMAC ethernet support"
162 depends on XTENSA_VARIANT_S6000
163 select PHYLIB
164 ---help---
165 This driver supports the on chip ethernet device on the
166 S6105 xtensa processor.
167
168 To compile this driver as a module, choose M here. The module
169 will be called s6gmac.
170
171source "drivers/net/ethernet/samsung/Kconfig" 159source "drivers/net/ethernet/samsung/Kconfig"
172source "drivers/net/ethernet/seeq/Kconfig" 160source "drivers/net/ethernet/seeq/Kconfig"
173source "drivers/net/ethernet/silan/Kconfig" 161source "drivers/net/ethernet/silan/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index bf56f8b36e90..1367afcd0a8b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
66obj-$(CONFIG_SH_ETH) += renesas/ 66obj-$(CONFIG_SH_ETH) += renesas/
67obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ 67obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
68obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ 68obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
69obj-$(CONFIG_S6GMAC) += s6gmac.o
70obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ 69obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
71obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 70obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
72obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ 71obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 1fcd5568a352..f3470d96837a 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
850 } 850 }
851 851
852 db->clk = devm_clk_get(&pdev->dev, NULL); 852 db->clk = devm_clk_get(&pdev->dev, NULL);
853 if (IS_ERR(db->clk)) 853 if (IS_ERR(db->clk)) {
854 ret = PTR_ERR(db->clk);
854 goto out; 855 goto out;
856 }
855 857
856 clk_prepare_enable(db->clk); 858 clk_prepare_enable(db->clk);
857 859
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3498760dc22a..760c72c6e2ac 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
1170init_error: 1170init_error:
1171 free_skbufs(dev); 1171 free_skbufs(dev);
1172alloc_skbuf_error: 1172alloc_skbuf_error:
1173 if (priv->phydev) {
1174 phy_disconnect(priv->phydev);
1175 priv->phydev = NULL;
1176 }
1177phy_error: 1173phy_error:
1178 return ret; 1174 return ret;
1179} 1175}
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
1186 int ret; 1182 int ret;
1187 unsigned long int flags; 1183 unsigned long int flags;
1188 1184
1189 /* Stop and disconnect the PHY */ 1185 /* Stop the PHY */
1190 if (priv->phydev) { 1186 if (priv->phydev)
1191 phy_stop(priv->phydev); 1187 phy_stop(priv->phydev);
1192 phy_disconnect(priv->phydev);
1193 priv->phydev = NULL;
1194 }
1195 1188
1196 netif_stop_queue(dev); 1189 netif_stop_queue(dev);
1197 napi_disable(&priv->napi); 1190 napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
1525static int altera_tse_remove(struct platform_device *pdev) 1518static int altera_tse_remove(struct platform_device *pdev)
1526{ 1519{
1527 struct net_device *ndev = platform_get_drvdata(pdev); 1520 struct net_device *ndev = platform_get_drvdata(pdev);
1521 struct altera_tse_private *priv = netdev_priv(ndev);
1522
1523 if (priv->phydev)
1524 phy_disconnect(priv->phydev);
1528 1525
1529 platform_set_drvdata(pdev, NULL); 1526 platform_set_drvdata(pdev, NULL);
1530 altera_tse_mdio_destroy(ndev); 1527 altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e398eda07298..c8af3ce3ea38 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
184 schedule_work(&alx->reset_wk); 184 schedule_work(&alx->reset_wk);
185} 185}
186 186
187static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) 187static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
188{ 188{
189 struct alx_rx_queue *rxq = &alx->rxq; 189 struct alx_rx_queue *rxq = &alx->rxq;
190 struct alx_rrd *rrd; 190 struct alx_rrd *rrd;
191 struct alx_buffer *rxb; 191 struct alx_buffer *rxb;
192 struct sk_buff *skb; 192 struct sk_buff *skb;
193 u16 length, rfd_cleaned = 0; 193 u16 length, rfd_cleaned = 0;
194 int work = 0;
194 195
195 while (budget > 0) { 196 while (work < budget) {
196 rrd = &rxq->rrd[rxq->rrd_read_idx]; 197 rrd = &rxq->rrd[rxq->rrd_read_idx];
197 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) 198 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
198 break; 199 break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
203 ALX_GET_FIELD(le32_to_cpu(rrd->word0), 204 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
204 RRD_NOR) != 1) { 205 RRD_NOR) != 1) {
205 alx_schedule_reset(alx); 206 alx_schedule_reset(alx);
206 return 0; 207 return work;
207 } 208 }
208 209
209 rxb = &rxq->bufs[rxq->read_idx]; 210 rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
243 } 244 }
244 245
245 napi_gro_receive(&alx->napi, skb); 246 napi_gro_receive(&alx->napi, skb);
246 budget--; 247 work++;
247 248
248next_pkt: 249next_pkt:
249 if (++rxq->read_idx == alx->rx_ringsz) 250 if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
258 if (rfd_cleaned) 259 if (rfd_cleaned)
259 alx_refill_rx_ring(alx, GFP_ATOMIC); 260 alx_refill_rx_ring(alx, GFP_ATOMIC);
260 261
261 return budget > 0; 262 return work;
262} 263}
263 264
264static int alx_poll(struct napi_struct *napi, int budget) 265static int alx_poll(struct napi_struct *napi, int budget)
265{ 266{
266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi); 267 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
267 struct alx_hw *hw = &alx->hw; 268 struct alx_hw *hw = &alx->hw;
268 bool complete = true;
269 unsigned long flags; 269 unsigned long flags;
270 bool tx_complete;
271 int work;
270 272
271 complete = alx_clean_tx_irq(alx) && 273 tx_complete = alx_clean_tx_irq(alx);
272 alx_clean_rx_irq(alx, budget); 274 work = alx_clean_rx_irq(alx, budget);
273 275
274 if (!complete) 276 if (!tx_complete || work == budget)
275 return 1; 277 return budget;
276 278
277 napi_complete(&alx->napi); 279 napi_complete(&alx->napi);
278 280
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
284 286
285 alx_post_write(hw); 287 alx_post_write(hw);
286 288
287 return 0; 289 return work;
288} 290}
289 291
290static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) 292static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 05c6af6c418f..3007d95fbb9f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
1167 bgmac->int_status = 0; 1167 bgmac->int_status = 0;
1168 } 1168 }
1169 1169
1170 if (handled < weight) 1170 if (handled < weight) {
1171 napi_complete(napi); 1171 napi_complete(napi);
1172 1172 bgmac_chip_intrs_on(bgmac);
1173 bgmac_chip_intrs_on(bgmac); 1173 }
1174 1174
1175 return handled; 1175 return handled;
1176} 1176}
@@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core)
1515 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 1515 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1516 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); 1516 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1517 1517
1518 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1519
1518 err = bgmac_mii_register(bgmac); 1520 err = bgmac_mii_register(bgmac);
1519 if (err) { 1521 if (err) {
1520 bgmac_err(bgmac, "Cannot register MDIO\n"); 1522 bgmac_err(bgmac, "Cannot register MDIO\n");
@@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core)
1529 1531
1530 netif_carrier_off(net_dev); 1532 netif_carrier_off(net_dev);
1531 1533
1532 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1533
1534 return 0; 1534 return 0;
1535 1535
1536err_mii_unregister: 1536err_mii_unregister:
@@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core)
1549{ 1549{
1550 struct bgmac *bgmac = bcma_get_drvdata(core); 1550 struct bgmac *bgmac = bcma_get_drvdata(core);
1551 1551
1552 netif_napi_del(&bgmac->napi);
1553 unregister_netdev(bgmac->net_dev); 1552 unregister_netdev(bgmac->net_dev);
1554 bgmac_mii_unregister(bgmac); 1553 bgmac_mii_unregister(bgmac);
1554 netif_napi_del(&bgmac->napi);
1555 bgmac_dma_free(bgmac); 1555 bgmac_dma_free(bgmac);
1556 bcma_set_drvdata(core, NULL); 1556 bcma_set_drvdata(core, NULL);
1557 free_netdev(bgmac->net_dev); 1557 free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9f5e38769a29..72eef9fc883e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
12553 return 0; 12553 return 0;
12554} 12554}
12555 12555
12556static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev) 12556static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12557 struct net_device *dev,
12558 netdev_features_t features)
12557{ 12559{
12558 return vxlan_gso_check(skb); 12560 return vxlan_features_check(skb, features);
12559} 12561}
12560 12562
12561static const struct net_device_ops bnx2x_netdev_ops = { 12563static const struct net_device_ops bnx2x_netdev_ops = {
@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
12589#endif 12591#endif
12590 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 12592 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
12591 .ndo_set_vf_link_state = bnx2x_set_vf_link_state, 12593 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
12592 .ndo_gso_check = bnx2x_gso_check, 12594 .ndo_features_check = bnx2x_features_check,
12593}; 12595};
12594 12596
12595static int bnx2x_set_coherency_mask(struct bnx2x *bp) 12597static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bb48a610b72a..96bf01ba32dd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
7413} 7413}
7414 7414
7415static void tg3_irq_quiesce(struct tg3 *tp) 7415static void tg3_irq_quiesce(struct tg3 *tp)
7416 __releases(tp->lock)
7417 __acquires(tp->lock)
7416{ 7418{
7417 int i; 7419 int i;
7418 7420
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
7421 tp->irq_sync = 1; 7423 tp->irq_sync = 1;
7422 smp_mb(); 7424 smp_mb();
7423 7425
7426 spin_unlock_bh(&tp->lock);
7427
7424 for (i = 0; i < tp->irq_cnt; i++) 7428 for (i = 0; i < tp->irq_cnt; i++)
7425 synchronize_irq(tp->napi[i].irq_vec); 7429 synchronize_irq(tp->napi[i].irq_vec);
7430
7431 spin_lock_bh(&tp->lock);
7426} 7432}
7427 7433
7428/* Fully shutdown all tg3 driver activity elsewhere in the system. 7434/* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
9018 9024
9019/* tp->lock is held. */ 9025/* tp->lock is held. */
9020static int tg3_chip_reset(struct tg3 *tp) 9026static int tg3_chip_reset(struct tg3 *tp)
9027 __releases(tp->lock)
9028 __acquires(tp->lock)
9021{ 9029{
9022 u32 val; 9030 u32 val;
9023 void (*write_op)(struct tg3 *, u32, u32); 9031 void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
9073 } 9081 }
9074 smp_mb(); 9082 smp_mb();
9075 9083
9084 tg3_full_unlock(tp);
9085
9076 for (i = 0; i < tp->irq_cnt; i++) 9086 for (i = 0; i < tp->irq_cnt; i++)
9077 synchronize_irq(tp->napi[i].irq_vec); 9087 synchronize_irq(tp->napi[i].irq_vec);
9078 9088
9089 tg3_full_lock(tp, 0);
9090
9079 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9091 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9080 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; 9092 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9081 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 9093 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
10903{ 10915{
10904 struct tg3 *tp = (struct tg3 *) __opaque; 10916 struct tg3 *tp = (struct tg3 *) __opaque;
10905 10917
10906 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10907 goto restart_timer;
10908
10909 spin_lock(&tp->lock); 10918 spin_lock(&tp->lock);
10910 10919
10920 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10921 spin_unlock(&tp->lock);
10922 goto restart_timer;
10923 }
10924
10911 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10925 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10912 tg3_flag(tp, 57765_CLASS)) 10926 tg3_flag(tp, 57765_CLASS))
10913 tg3_chk_missed_msi(tp); 10927 tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
11101 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11115 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11102 int err; 11116 int err;
11103 11117
11118 rtnl_lock();
11104 tg3_full_lock(tp, 0); 11119 tg3_full_lock(tp, 0);
11105 11120
11106 if (!netif_running(tp->dev)) { 11121 if (!netif_running(tp->dev)) {
11107 tg3_flag_clear(tp, RESET_TASK_PENDING); 11122 tg3_flag_clear(tp, RESET_TASK_PENDING);
11108 tg3_full_unlock(tp); 11123 tg3_full_unlock(tp);
11124 rtnl_unlock();
11109 return; 11125 return;
11110 } 11126 }
11111 11127
@@ -11138,6 +11154,7 @@ out:
11138 tg3_phy_start(tp); 11154 tg3_phy_start(tp);
11139 11155
11140 tg3_flag_clear(tp, RESET_TASK_PENDING); 11156 tg3_flag_clear(tp, RESET_TASK_PENDING);
11157 rtnl_unlock();
11141} 11158}
11142 11159
11143static int tg3_request_irq(struct tg3 *tp, int irq_num) 11160static int tg3_request_irq(struct tg3 *tp, int irq_num)
@@ -17800,23 +17817,6 @@ static int tg3_init_one(struct pci_dev *pdev,
17800 goto err_out_apeunmap; 17817 goto err_out_apeunmap;
17801 } 17818 }
17802 17819
17803 /*
17804 * Reset chip in case UNDI or EFI driver did not shutdown
17805 * DMA self test will enable WDMAC and we'll see (spurious)
17806 * pending DMA on the PCI bus at that point.
17807 */
17808 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17809 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17810 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17811 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17812 }
17813
17814 err = tg3_test_dma(tp);
17815 if (err) {
17816 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17817 goto err_out_apeunmap;
17818 }
17819
17820 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17820 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17821 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17821 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17822 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 17822 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17861,6 +17861,23 @@ static int tg3_init_one(struct pci_dev *pdev,
17861 sndmbx += 0xc; 17861 sndmbx += 0xc;
17862 } 17862 }
17863 17863
17864 /*
17865 * Reset chip in case UNDI or EFI driver did not shutdown
17866 * DMA self test will enable WDMAC and we'll see (spurious)
17867 * pending DMA on the PCI bus at that point.
17868 */
17869 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17870 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17871 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17872 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17873 }
17874
17875 err = tg3_test_dma(tp);
17876 if (err) {
17877 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17878 goto err_out_apeunmap;
17879 }
17880
17864 tg3_init_coal(tp); 17881 tg3_init_coal(tp);
17865 17882
17866 pci_set_drvdata(pdev, dev); 17883 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7d6aa8c87df8..619083a860a4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
172 172
173 /* Retrieve flash partition info */ 173 /* Retrieve flash partition info */
174 fcomp.comp_status = 0; 174 fcomp.comp_status = 0;
175 init_completion(&fcomp.comp); 175 reinit_completion(&fcomp.comp);
176 spin_lock_irqsave(&bnad->bna_lock, flags); 176 spin_lock_irqsave(&bnad->bna_lock, flags);
177 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, 177 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
178 bnad_cb_completion, &fcomp); 178 bnad_cb_completion, &fcomp);
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 55eb7f2af2b4..7ef55f5fa664 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
340 res = PTR_ERR(lp->pclk); 340 res = PTR_ERR(lp->pclk);
341 goto err_free_dev; 341 goto err_free_dev;
342 } 342 }
343 clk_enable(lp->pclk); 343 clk_prepare_enable(lp->pclk);
344 344
345 lp->hclk = ERR_PTR(-ENOENT); 345 lp->hclk = ERR_PTR(-ENOENT);
346 lp->tx_clk = ERR_PTR(-ENOENT); 346 lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
406err_out_unregister_netdev: 406err_out_unregister_netdev:
407 unregister_netdev(dev); 407 unregister_netdev(dev);
408err_disable_clock: 408err_disable_clock:
409 clk_disable(lp->pclk); 409 clk_disable_unprepare(lp->pclk);
410err_free_dev: 410err_free_dev:
411 free_netdev(dev); 411 free_netdev(dev);
412 return res; 412 return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
424 kfree(lp->mii_bus->irq); 424 kfree(lp->mii_bus->irq);
425 mdiobus_free(lp->mii_bus); 425 mdiobus_free(lp->mii_bus);
426 unregister_netdev(dev); 426 unregister_netdev(dev);
427 clk_disable(lp->pclk); 427 clk_disable_unprepare(lp->pclk);
428 free_netdev(dev); 428 free_netdev(dev);
429 429
430 return 0; 430 return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
440 netif_stop_queue(net_dev); 440 netif_stop_queue(net_dev);
441 netif_device_detach(net_dev); 441 netif_device_detach(net_dev);
442 442
443 clk_disable(lp->pclk); 443 clk_disable_unprepare(lp->pclk);
444 } 444 }
445 return 0; 445 return 0;
446} 446}
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
451 struct macb *lp = netdev_priv(net_dev); 451 struct macb *lp = netdev_priv(net_dev);
452 452
453 if (netif_running(net_dev)) { 453 if (netif_running(net_dev)) {
454 clk_enable(lp->pclk); 454 clk_prepare_enable(lp->pclk);
455 455
456 netif_device_attach(net_dev); 456 netif_device_attach(net_dev);
457 netif_start_queue(net_dev); 457 netif_start_queue(net_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index d00a751f0588..6049f70e110c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -96,6 +96,9 @@ struct port_info {
96 s16 xact_addr_filt; /* index of our MAC address filter */ 96 s16 xact_addr_filt; /* index of our MAC address filter */
97 u16 rss_size; /* size of VI's RSS table slice */ 97 u16 rss_size; /* size of VI's RSS table slice */
98 u8 pidx; /* index into adapter port[] */ 98 u8 pidx; /* index into adapter port[] */
99 s8 mdio_addr;
100 u8 port_type; /* firmware port type */
101 u8 mod_type; /* firmware module type */
99 u8 port_id; /* physical port ID */ 102 u8 port_id; /* physical port ID */
100 u8 nqsets; /* # of "Queue Sets" */ 103 u8 nqsets; /* # of "Queue Sets" */
101 u8 first_qset; /* index of first "Queue Set" */ 104 u8 first_qset; /* index of first "Queue Set" */
@@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
522 * is "contracted" to provide for the common code. 525 * is "contracted" to provide for the common code.
523 */ 526 */
524void t4vf_os_link_changed(struct adapter *, int, int); 527void t4vf_os_link_changed(struct adapter *, int, int);
528void t4vf_os_portmod_changed(struct adapter *, int);
525 529
526/* 530/*
527 * SGE function prototype declarations. 531 * SGE function prototype declarations.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index aa74ec34a467..a936ee8958c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -44,6 +44,7 @@
44#include <linux/etherdevice.h> 44#include <linux/etherdevice.h>
45#include <linux/debugfs.h> 45#include <linux/debugfs.h>
46#include <linux/ethtool.h> 46#include <linux/ethtool.h>
47#include <linux/mdio.h>
47 48
48#include "t4vf_common.h" 49#include "t4vf_common.h"
49#include "t4vf_defs.h" 50#include "t4vf_defs.h"
@@ -210,6 +211,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
210} 211}
211 212
212/* 213/*
214 * THe port module type has changed on the indicated "port" (Virtual
215 * Interface).
216 */
217void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
218{
219 static const char * const mod_str[] = {
220 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
221 };
222 const struct net_device *dev = adapter->port[pidx];
223 const struct port_info *pi = netdev_priv(dev);
224
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
227 dev->name);
228 else if (pi->mod_type < ARRAY_SIZE(mod_str))
229 dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
230 dev->name, mod_str[pi->mod_type]);
231 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
232 dev_info(adapter->pdev_dev, "%s: unsupported optical port "
233 "module inserted\n", dev->name);
234 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
235 dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
236 "forcing TWINAX\n", dev->name);
237 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
238 dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
239 dev->name);
240 else
241 dev_info(adapter->pdev_dev, "%s: unknown module type %d "
242 "inserted\n", dev->name, pi->mod_type);
243}
244
245/*
213 * Net device operations. 246 * Net device operations.
214 * ====================== 247 * ======================
215 */ 248 */
@@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
1193 * state of the port to which we're linked. 1226 * state of the port to which we're linked.
1194 */ 1227 */
1195 1228
1196/* 1229static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
1197 * Return current port link settings. 1230 unsigned int caps)
1198 */ 1231{
1199static int cxgb4vf_get_settings(struct net_device *dev, 1232 unsigned int v = 0;
1200 struct ethtool_cmd *cmd) 1233
1201{ 1234 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1202 const struct port_info *pi = netdev_priv(dev); 1235 type == FW_PORT_TYPE_BT_XAUI) {
1236 v |= SUPPORTED_TP;
1237 if (caps & FW_PORT_CAP_SPEED_100M)
1238 v |= SUPPORTED_100baseT_Full;
1239 if (caps & FW_PORT_CAP_SPEED_1G)
1240 v |= SUPPORTED_1000baseT_Full;
1241 if (caps & FW_PORT_CAP_SPEED_10G)
1242 v |= SUPPORTED_10000baseT_Full;
1243 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1244 v |= SUPPORTED_Backplane;
1245 if (caps & FW_PORT_CAP_SPEED_1G)
1246 v |= SUPPORTED_1000baseKX_Full;
1247 if (caps & FW_PORT_CAP_SPEED_10G)
1248 v |= SUPPORTED_10000baseKX4_Full;
1249 } else if (type == FW_PORT_TYPE_KR)
1250 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1251 else if (type == FW_PORT_TYPE_BP_AP)
1252 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1253 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1254 else if (type == FW_PORT_TYPE_BP4_AP)
1255 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1256 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1257 SUPPORTED_10000baseKX4_Full;
1258 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1259 type == FW_PORT_TYPE_FIBER_XAUI ||
1260 type == FW_PORT_TYPE_SFP ||
1261 type == FW_PORT_TYPE_QSFP_10G ||
1262 type == FW_PORT_TYPE_QSA) {
1263 v |= SUPPORTED_FIBRE;
1264 if (caps & FW_PORT_CAP_SPEED_1G)
1265 v |= SUPPORTED_1000baseT_Full;
1266 if (caps & FW_PORT_CAP_SPEED_10G)
1267 v |= SUPPORTED_10000baseT_Full;
1268 } else if (type == FW_PORT_TYPE_BP40_BA ||
1269 type == FW_PORT_TYPE_QSFP) {
1270 v |= SUPPORTED_40000baseSR4_Full;
1271 v |= SUPPORTED_FIBRE;
1272 }
1273
1274 if (caps & FW_PORT_CAP_ANEG)
1275 v |= SUPPORTED_Autoneg;
1276 return v;
1277}
1278
1279static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1280{
1281 const struct port_info *p = netdev_priv(dev);
1282
1283 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1284 p->port_type == FW_PORT_TYPE_BT_XFI ||
1285 p->port_type == FW_PORT_TYPE_BT_XAUI)
1286 cmd->port = PORT_TP;
1287 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1288 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1289 cmd->port = PORT_FIBRE;
1290 else if (p->port_type == FW_PORT_TYPE_SFP ||
1291 p->port_type == FW_PORT_TYPE_QSFP_10G ||
1292 p->port_type == FW_PORT_TYPE_QSA ||
1293 p->port_type == FW_PORT_TYPE_QSFP) {
1294 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
1295 p->mod_type == FW_PORT_MOD_TYPE_SR ||
1296 p->mod_type == FW_PORT_MOD_TYPE_ER ||
1297 p->mod_type == FW_PORT_MOD_TYPE_LRM)
1298 cmd->port = PORT_FIBRE;
1299 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1300 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1301 cmd->port = PORT_DA;
1302 else
1303 cmd->port = PORT_OTHER;
1304 } else
1305 cmd->port = PORT_OTHER;
1203 1306
1204 cmd->supported = pi->link_cfg.supported; 1307 if (p->mdio_addr >= 0) {
1205 cmd->advertising = pi->link_cfg.advertising; 1308 cmd->phy_address = p->mdio_addr;
1309 cmd->transceiver = XCVR_EXTERNAL;
1310 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1311 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1312 } else {
1313 cmd->phy_address = 0; /* not really, but no better option */
1314 cmd->transceiver = XCVR_INTERNAL;
1315 cmd->mdio_support = 0;
1316 }
1317
1318 cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
1319 p->link_cfg.supported);
1320 cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
1321 p->link_cfg.advertising);
1206 ethtool_cmd_speed_set(cmd, 1322 ethtool_cmd_speed_set(cmd,
1207 netif_carrier_ok(dev) ? pi->link_cfg.speed : -1); 1323 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1208 cmd->duplex = DUPLEX_FULL; 1324 cmd->duplex = DUPLEX_FULL;
1209 1325 cmd->autoneg = p->link_cfg.autoneg;
1210 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1211 cmd->phy_address = pi->port_id;
1212 cmd->transceiver = XCVR_EXTERNAL;
1213 cmd->autoneg = pi->link_cfg.autoneg;
1214 cmd->maxtxpkt = 0; 1326 cmd->maxtxpkt = 0;
1215 cmd->maxrxpkt = 0; 1327 cmd->maxrxpkt = 0;
1216 return 0; 1328 return 0;
@@ -2318,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
2318 */ 2430 */
2319 n10g = 0; 2431 n10g = 0;
2320 for_each_port(adapter, pidx) 2432 for_each_port(adapter, pidx)
2321 n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); 2433 n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2322 2434
2323 /* 2435 /*
2324 * We default to 1 queue per non-10G port and up to # of cores queues 2436 * We default to 1 queue per non-10G port and up to # of cores queues
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8d3237f5e364..b9debb4f29a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -230,7 +230,7 @@ struct adapter_params {
230 230
231static inline bool is_10g_port(const struct link_config *lc) 231static inline bool is_10g_port(const struct link_config *lc)
232{ 232{
233 return (lc->supported & SUPPORTED_10000baseT_Full) != 0; 233 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
234} 234}
235 235
236static inline bool is_x_10g_port(const struct link_config *lc) 236static inline bool is_x_10g_port(const struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 02e8833b7797..60426cf890a7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr)
245 return a & 0x3f; 245 return a & 0x3f;
246} 246}
247 247
248#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
249 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
250 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
251
248/** 252/**
249 * init_link_config - initialize a link's SW state 253 * init_link_config - initialize a link's SW state
250 * @lc: structure holding the link state 254 * @lc: structure holding the link state
@@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
259 lc->requested_speed = 0; 263 lc->requested_speed = 0;
260 lc->speed = 0; 264 lc->speed = 0;
261 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 265 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
262 if (lc->supported & SUPPORTED_Autoneg) { 266 if (lc->supported & FW_PORT_CAP_ANEG) {
263 lc->advertising = lc->supported; 267 lc->advertising = lc->supported & ADVERT_MASK;
264 lc->autoneg = AUTONEG_ENABLE; 268 lc->autoneg = AUTONEG_ENABLE;
265 lc->requested_fc |= PAUSE_AUTONEG; 269 lc->requested_fc |= PAUSE_AUTONEG;
266 } else { 270 } else {
@@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
280 struct fw_vi_cmd vi_cmd, vi_rpl; 284 struct fw_vi_cmd vi_cmd, vi_rpl;
281 struct fw_port_cmd port_cmd, port_rpl; 285 struct fw_port_cmd port_cmd, port_rpl;
282 int v; 286 int v;
283 u32 word;
284 287
285 /* 288 /*
286 * Execute a VI Read command to get our Virtual Interface information 289 * Execute a VI Read command to get our Virtual Interface information
@@ -319,19 +322,13 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
319 if (v) 322 if (v)
320 return v; 323 return v;
321 324
322 v = 0; 325 v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
323 word = be16_to_cpu(port_rpl.u.info.pcap); 326 pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
324 if (word & FW_PORT_CAP_SPEED_100M) 327 FW_PORT_CMD_MDIOADDR_G(v) : -1;
325 v |= SUPPORTED_100baseT_Full; 328 pi->port_type = FW_PORT_CMD_PTYPE_G(v);
326 if (word & FW_PORT_CAP_SPEED_1G) 329 pi->mod_type = FW_PORT_MOD_TYPE_NA;
327 v |= SUPPORTED_1000baseT_Full; 330
328 if (word & FW_PORT_CAP_SPEED_10G) 331 init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
329 v |= SUPPORTED_10000baseT_Full;
330 if (word & FW_PORT_CAP_SPEED_40G)
331 v |= SUPPORTED_40000baseSR4_Full;
332 if (word & FW_PORT_CAP_ANEG)
333 v |= SUPPORTED_Autoneg;
334 init_link_config(&pi->link_cfg, v);
335 332
336 return 0; 333 return 0;
337} 334}
@@ -1491,7 +1488,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1491 */ 1488 */
1492 const struct fw_port_cmd *port_cmd = 1489 const struct fw_port_cmd *port_cmd =
1493 (const struct fw_port_cmd *)rpl; 1490 (const struct fw_port_cmd *)rpl;
1494 u32 word; 1491 u32 stat, mod;
1495 int action, port_id, link_ok, speed, fc, pidx; 1492 int action, port_id, link_ok, speed, fc, pidx;
1496 1493
1497 /* 1494 /*
@@ -1509,21 +1506,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1509 port_id = FW_PORT_CMD_PORTID_G( 1506 port_id = FW_PORT_CMD_PORTID_G(
1510 be32_to_cpu(port_cmd->op_to_portid)); 1507 be32_to_cpu(port_cmd->op_to_portid));
1511 1508
1512 word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1509 stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
1513 link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0; 1510 link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
1514 speed = 0; 1511 speed = 0;
1515 fc = 0; 1512 fc = 0;
1516 if (word & FW_PORT_CMD_RXPAUSE_F) 1513 if (stat & FW_PORT_CMD_RXPAUSE_F)
1517 fc |= PAUSE_RX; 1514 fc |= PAUSE_RX;
1518 if (word & FW_PORT_CMD_TXPAUSE_F) 1515 if (stat & FW_PORT_CMD_TXPAUSE_F)
1519 fc |= PAUSE_TX; 1516 fc |= PAUSE_TX;
1520 if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1517 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1521 speed = 100; 1518 speed = 100;
1522 else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1519 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1523 speed = 1000; 1520 speed = 1000;
1524 else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1521 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1525 speed = 10000; 1522 speed = 10000;
1526 else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1523 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1527 speed = 40000; 1524 speed = 40000;
1528 1525
1529 /* 1526 /*
@@ -1540,12 +1537,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1540 continue; 1537 continue;
1541 1538
1542 lc = &pi->link_cfg; 1539 lc = &pi->link_cfg;
1540
1541 mod = FW_PORT_CMD_MODTYPE_G(stat);
1542 if (mod != pi->mod_type) {
1543 pi->mod_type = mod;
1544 t4vf_os_portmod_changed(adapter, pidx);
1545 }
1546
1543 if (link_ok != lc->link_ok || speed != lc->speed || 1547 if (link_ok != lc->link_ok || speed != lc->speed ||
1544 fc != lc->fc) { 1548 fc != lc->fc) {
1545 /* something changed */ 1549 /* something changed */
1546 lc->link_ok = link_ok; 1550 lc->link_ok = link_ok;
1547 lc->speed = speed; 1551 lc->speed = speed;
1548 lc->fc = fc; 1552 lc->fc = fc;
1553 lc->supported =
1554 be16_to_cpu(port_cmd->u.info.pcap);
1549 t4vf_os_link_changed(adapter, pidx, link_ok); 1555 t4vf_os_link_changed(adapter, pidx, link_ok);
1550 } 1556 }
1551 } 1557 }
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 868d0f605d60..b29e027c476e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1060 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 1060 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1061 } 1061 }
1062 1062
1063 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { 1063 /* Hardware does not provide whole packet checksum. It only
1064 skb->csum = htons(checksum); 1064 * provides pseudo checksum. Since hw validates the packet
1065 skb->ip_summed = CHECKSUM_COMPLETE; 1065 * checksum but not provide us the checksum value. use
1066 } 1066 * CHECSUM_UNNECESSARY.
1067 */
1068 if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
1069 ipv4_csum_ok)
1070 skb->ip_summed = CHECKSUM_UNNECESSARY;
1067 1071
1068 if (vlan_stripped) 1072 if (vlan_stripped)
1069 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); 1073 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
@@ -1612,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
1612 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1616 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1613 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1617 netdev_err(netdev, "Unable to alloc receive buffers\n");
1614 err = -ENOMEM; 1618 err = -ENOMEM;
1615 goto err_out_notify_unset; 1619 goto err_out_free_rq;
1616 } 1620 }
1617 } 1621 }
1618 1622
@@ -1645,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
1645 1649
1646 return 0; 1650 return 0;
1647 1651
1648err_out_notify_unset: 1652err_out_free_rq:
1653 for (i = 0; i < enic->rq_count; i++)
1654 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1649 enic_dev_notify_unset(enic); 1655 enic_dev_notify_unset(enic);
1650err_out_free_intr: 1656err_out_free_intr:
1651 enic_free_intr(enic); 1657 enic_free_intr(enic);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index a379c3e4b57f..13d00a38a5bd 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
398 * break out of while loop if there are no more 398 * break out of while loop if there are no more
399 * packets waiting 399 * packets waiting
400 */ 400 */
401 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { 401 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
402 napi_complete(napi); 402 break;
403 int_enable = dnet_readl(bp, INTR_ENB);
404 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
405 dnet_writel(bp, int_enable, INTR_ENB);
406 return 0;
407 }
408 403
409 cmd_word = dnet_readl(bp, RX_LEN_FIFO); 404 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
410 pkt_len = cmd_word & 0xFFFF; 405 pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
433 "size %u.\n", dev->name, pkt_len); 428 "size %u.\n", dev->name, pkt_len);
434 } 429 }
435 430
436 budget -= npackets;
437
438 if (npackets < budget) { 431 if (npackets < budget) {
439 /* We processed all packets available. Tell NAPI it can 432 /* We processed all packets available. Tell NAPI it can
440 * stop polling then re-enable rx interrupts */ 433 * stop polling then re-enable rx interrupts.
434 */
441 napi_complete(napi); 435 napi_complete(napi);
442 int_enable = dnet_readl(bp, INTR_ENB); 436 int_enable = dnet_readl(bp, INTR_ENB);
443 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 437 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
444 dnet_writel(bp, int_enable, INTR_ENB); 438 dnet_writel(bp, int_enable, INTR_ENB);
445 return 0;
446 } 439 }
447 440
448 /* There are still packets waiting */ 441 return npackets;
449 return 1;
450} 442}
451 443
452static irqreturn_t dnet_interrupt(int irq, void *dev_id) 444static irqreturn_t dnet_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 196073110e32..d48806b5cd88 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4383 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload 4383 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4384 * is expected to work across all types of IP tunnels once exported. Skyhawk 4384 * is expected to work across all types of IP tunnels once exported. Skyhawk
4385 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN 4385 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4386 * offloads in hw_enc_features only when a VxLAN port is added. Note this only 4386 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4387 * ensures that other tunnels work fine while VxLAN offloads are not enabled. 4387 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4388 * those other tunnels are unexported on the fly through ndo_features_check().
4388 * 4389 *
4389 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack 4390 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4390 * adds more than one port, disable offloads and don't re-enable them again 4391 * adds more than one port, disable offloads and don't re-enable them again
@@ -4459,9 +4460,45 @@ done:
4459 adapter->vxlan_port_count--; 4460 adapter->vxlan_port_count--;
4460} 4461}
4461 4462
4462static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) 4463static netdev_features_t be_features_check(struct sk_buff *skb,
4464 struct net_device *dev,
4465 netdev_features_t features)
4463{ 4466{
4464 return vxlan_gso_check(skb); 4467 struct be_adapter *adapter = netdev_priv(dev);
4468 u8 l4_hdr = 0;
4469
4470 /* The code below restricts offload features for some tunneled packets.
4471 * Offload features for normal (non tunnel) packets are unchanged.
4472 */
4473 if (!skb->encapsulation ||
4474 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4475 return features;
4476
4477 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4478 * should disable tunnel offload features if it's not a VxLAN packet,
4479 * as tunnel offloads have been enabled only for VxLAN. This is done to
4480 * allow other tunneled traffic like GRE work fine while VxLAN
4481 * offloads are configured in Skyhawk-R.
4482 */
4483 switch (vlan_get_protocol(skb)) {
4484 case htons(ETH_P_IP):
4485 l4_hdr = ip_hdr(skb)->protocol;
4486 break;
4487 case htons(ETH_P_IPV6):
4488 l4_hdr = ipv6_hdr(skb)->nexthdr;
4489 break;
4490 default:
4491 return features;
4492 }
4493
4494 if (l4_hdr != IPPROTO_UDP ||
4495 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4496 skb->inner_protocol != htons(ETH_P_TEB) ||
4497 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4498 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4499 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4500
4501 return features;
4465} 4502}
4466#endif 4503#endif
4467 4504
@@ -4492,7 +4529,7 @@ static const struct net_device_ops be_netdev_ops = {
4492#ifdef CONFIG_BE2NET_VXLAN 4529#ifdef CONFIG_BE2NET_VXLAN
4493 .ndo_add_vxlan_port = be_add_vxlan_port, 4530 .ndo_add_vxlan_port = be_add_vxlan_port,
4494 .ndo_del_vxlan_port = be_del_vxlan_port, 4531 .ndo_del_vxlan_port = be_del_vxlan_port,
4495 .ndo_gso_check = be_gso_check, 4532 .ndo_features_check = be_features_check,
4496#endif 4533#endif
4497}; 4534};
4498 4535
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 469691ad4a1e..40132929daf7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -424,6 +424,8 @@ struct bufdesc_ex {
424 * (40ns * 6). 424 * (40ns * 6).
425 */ 425 */
426#define FEC_QUIRK_BUG_CAPTURE (1 << 10) 426#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
427/* Controller has only one MDIO bus */
428#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
427 429
428struct fec_enet_priv_tx_q { 430struct fec_enet_priv_tx_q {
429 int index; 431 int index;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5ebdf8dc8a31..bba87775419d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
91 .driver_data = 0, 91 .driver_data = 0,
92 }, { 92 }, {
93 .name = "imx28-fec", 93 .name = "imx28-fec",
94 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 94 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
95 FEC_QUIRK_SINGLE_MDIO,
95 }, { 96 }, {
96 .name = "imx6q-fec", 97 .name = "imx6q-fec",
97 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1937 int err = -ENXIO, i; 1938 int err = -ENXIO, i;
1938 1939
1939 /* 1940 /*
1940 * The dual fec interfaces are not equivalent with enet-mac. 1941 * The i.MX28 dual fec interfaces are not equal.
1941 * Here are the differences: 1942 * Here are the differences:
1942 * 1943 *
1943 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1944 * - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1952 * mdio interface in board design, and need to be configured by 1953 * mdio interface in board design, and need to be configured by
1953 * fec0 mii_bus. 1954 * fec0 mii_bus.
1954 */ 1955 */
1955 if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { 1956 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1956 /* fec1 uses fec0 mii_bus */ 1957 /* fec1 uses fec0 mii_bus */
1957 if (mii_cnt && fec0_mii_bus) { 1958 if (mii_cnt && fec0_mii_bus) {
1958 fep->mii_bus = fec0_mii_bus; 1959 fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
2015 mii_cnt++; 2016 mii_cnt++;
2016 2017
2017 /* save fec0 mii_bus */ 2018 /* save fec0 mii_bus */
2018 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2019 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2019 fec0_mii_bus = fep->mii_bus; 2020 fec0_mii_bus = fep->mii_bus;
2020 2021
2021 return 0; 2022 return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
3129 pdev->id_entry = of_id->data; 3130 pdev->id_entry = of_id->data;
3130 fep->quirks = pdev->id_entry->driver_data; 3131 fep->quirks = pdev->id_entry->driver_data;
3131 3132
3133 fep->netdev = ndev;
3132 fep->num_rx_queues = num_rx_qs; 3134 fep->num_rx_queues = num_rx_qs;
3133 fep->num_tx_queues = num_tx_qs; 3135 fep->num_tx_queues = num_tx_qs;
3134 3136
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5b8300a32bf5..4d61ef50b465 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -281,6 +281,17 @@ config I40E_DCB
281 281
282 If unsure, say N. 282 If unsure, say N.
283 283
284config I40E_FCOE
285 bool "Fibre Channel over Ethernet (FCoE)"
286 default n
287 depends on I40E && DCB && FCOE
288 ---help---
289 Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
290 in the driver. This will create new netdev for exclusive FCoE
291 use with XL710 FCoE offloads enabled.
292
293 If unsure, say N.
294
284config I40EVF 295config I40EVF
285 tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" 296 tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
286 depends on PCI_MSI 297 depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 781065eb5431..e9c3a87e5b11 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); 1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && 1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1546 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { 1546 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1547 /* enable/disable MDI/MDI-X auto-switching. */ 1547 /* enable/disable MDI/MDI-X auto-switching. */
1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); 1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 4b94ddb29c24..c40581999121 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
44 i40e_virtchnl_pf.o 44 i40e_virtchnl_pf.o
45 45
46i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o 46i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
47i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o 47i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 433a55886ad2..cb0de455683e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
829 if (desc_n >= ring->count || desc_n < 0) { 829 if (desc_n >= ring->count || desc_n < 0) {
830 dev_info(&pf->pdev->dev, 830 dev_info(&pf->pdev->dev,
831 "descriptor %d not found\n", desc_n); 831 "descriptor %d not found\n", desc_n);
832 return; 832 goto out;
833 } 833 }
834 if (!is_rx_ring) { 834 if (!is_rx_ring) {
835 txd = I40E_TX_DESC(ring, desc_n); 835 txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
855 } else { 855 } else {
856 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); 856 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
857 } 857 }
858
859out:
858 kfree(ring); 860 kfree(ring);
859} 861}
860 862
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 045b5c4b98b3..ad802dd0f67a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,7 @@ do { \
78} while (0) 78} while (0)
79 79
80typedef enum i40e_status_code i40e_status; 80typedef enum i40e_status_code i40e_status;
81#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 81#ifdef CONFIG_I40E_FCOE
82#define I40E_FCOE 82#define I40E_FCOE
83#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ 83#endif
84#endif /* _I40E_OSDEP_H_ */ 84#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 04b441460bbd..cecb340898fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
658 return le32_to_cpu(*(volatile __le32 *)head); 658 return le32_to_cpu(*(volatile __le32 *)head);
659} 659}
660 660
661#define WB_STRIDE 0x3
662
661/** 663/**
662 * i40e_clean_tx_irq - Reclaim resources after transmit completes 664 * i40e_clean_tx_irq - Reclaim resources after transmit completes
663 * @tx_ring: tx ring to clean 665 * @tx_ring: tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
759 tx_ring->q_vector->tx.total_bytes += total_bytes; 761 tx_ring->q_vector->tx.total_bytes += total_bytes;
760 tx_ring->q_vector->tx.total_packets += total_packets; 762 tx_ring->q_vector->tx.total_packets += total_packets;
761 763
764 /* check to see if there are any non-cache aligned descriptors
765 * waiting to be written back, and kick the hardware to force
766 * them to be written back in case of napi polling
767 */
768 if (budget &&
769 !((i & WB_STRIDE) == WB_STRIDE) &&
770 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
771 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
772 tx_ring->arm_wb = true;
773 else
774 tx_ring->arm_wb = false;
775
762 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { 776 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
763 /* schedule immediate reset if we believe we hung */ 777 /* schedule immediate reset if we believe we hung */
764 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" 778 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
777 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 791 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
778 792
779 dev_info(tx_ring->dev, 793 dev_info(tx_ring->dev,
780 "tx hang detected on queue %d, resetting adapter\n", 794 "tx hang detected on queue %d, reset requested\n",
781 tx_ring->queue_index); 795 tx_ring->queue_index);
782 796
783 tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); 797 /* do not fire the reset immediately, wait for the stack to
798 * decide we are truly stuck, also prevents every queue from
799 * simultaneously requesting a reset
800 */
784 801
785 /* the adapter is about to reset, no point in enabling stuff */ 802 /* the adapter is about to reset, no point in enabling polling */
786 return true; 803 budget = 1;
787 } 804 }
788 805
789 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, 806 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
806 } 823 }
807 } 824 }
808 825
809 return budget > 0; 826 return !!budget;
827}
828
829/**
830 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
831 * @vsi: the VSI we care about
832 * @q_vector: the vector on which to force writeback
833 *
834 **/
835static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
836{
837 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
838 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
839 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
840 /* allow 00 to be written to the index */;
841
842 wr32(&vsi->back->hw,
843 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
844 val);
810} 845}
811 846
812/** 847/**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1290 * so the total length of IPv4 header is IHL*4 bytes 1325 * so the total length of IPv4 header is IHL*4 bytes
1291 * The UDP_0 bit *may* bet set if the *inner* header is UDP 1326 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1292 */ 1327 */
1293 if (ipv4_tunnel && 1328 if (ipv4_tunnel) {
1294 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
1295 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1296 skb->transport_header = skb->mac_header + 1329 skb->transport_header = skb->mac_header +
1297 sizeof(struct ethhdr) + 1330 sizeof(struct ethhdr) +
1298 (ip_hdr(skb)->ihl * 4); 1331 (ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1302 skb->protocol == htons(ETH_P_8021AD)) 1335 skb->protocol == htons(ETH_P_8021AD))
1303 ? VLAN_HLEN : 0; 1336 ? VLAN_HLEN : 0;
1304 1337
1305 rx_udp_csum = udp_csum(skb); 1338 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1306 iph = ip_hdr(skb); 1339 (udp_hdr(skb)->check != 0)) {
1307 csum = csum_tcpudp_magic( 1340 rx_udp_csum = udp_csum(skb);
1308 iph->saddr, iph->daddr, 1341 iph = ip_hdr(skb);
1309 (skb->len - skb_transport_offset(skb)), 1342 csum = csum_tcpudp_magic(
1310 IPPROTO_UDP, rx_udp_csum); 1343 iph->saddr, iph->daddr,
1344 (skb->len - skb_transport_offset(skb)),
1345 IPPROTO_UDP, rx_udp_csum);
1311 1346
1312 if (udp_hdr(skb)->check != csum) 1347 if (udp_hdr(skb)->check != csum)
1313 goto checksum_fail; 1348 goto checksum_fail;
1349
1350 } /* else its GRE and so no outer UDP header */
1314 } 1351 }
1315 1352
1316 skb->ip_summed = CHECKSUM_UNNECESSARY; 1353 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1581 struct i40e_vsi *vsi = q_vector->vsi; 1618 struct i40e_vsi *vsi = q_vector->vsi;
1582 struct i40e_ring *ring; 1619 struct i40e_ring *ring;
1583 bool clean_complete = true; 1620 bool clean_complete = true;
1621 bool arm_wb = false;
1584 int budget_per_ring; 1622 int budget_per_ring;
1585 1623
1586 if (test_bit(__I40E_DOWN, &vsi->state)) { 1624 if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1591 /* Since the actual Tx work is minimal, we can give the Tx a larger 1629 /* Since the actual Tx work is minimal, we can give the Tx a larger
1592 * budget and be more aggressive about cleaning up the Tx descriptors. 1630 * budget and be more aggressive about cleaning up the Tx descriptors.
1593 */ 1631 */
1594 i40e_for_each_ring(ring, q_vector->tx) 1632 i40e_for_each_ring(ring, q_vector->tx) {
1595 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); 1633 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1634 arm_wb |= ring->arm_wb;
1635 }
1596 1636
1597 /* We attempt to distribute budget to each Rx queue fairly, but don't 1637 /* We attempt to distribute budget to each Rx queue fairly, but don't
1598 * allow the budget to go below 1 because that would exit polling early. 1638 * allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1603 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1643 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
1604 1644
1605 /* If work not completed, return budget and polling will return */ 1645 /* If work not completed, return budget and polling will return */
1606 if (!clean_complete) 1646 if (!clean_complete) {
1647 if (arm_wb)
1648 i40e_force_wb(vsi, q_vector);
1607 return budget; 1649 return budget;
1650 }
1608 1651
1609 /* Work is done so exit the polling mode and re-enable the interrupt */ 1652 /* Work is done so exit the polling mode and re-enable the interrupt */
1610 napi_complete(napi); 1653 napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1840 if (err < 0) 1883 if (err < 0)
1841 return err; 1884 return err;
1842 1885
1843 if (protocol == htons(ETH_P_IP)) { 1886 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1844 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1887 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1888
1889 if (iph->version == 4) {
1845 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1890 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1846 iph->tot_len = 0; 1891 iph->tot_len = 0;
1847 iph->check = 0; 1892 iph->check = 0;
1848 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1893 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1849 0, IPPROTO_TCP, 0); 1894 0, IPPROTO_TCP, 0);
1850 } else if (skb_is_gso_v6(skb)) { 1895 } else if (ipv6h->version == 6) {
1851
1852 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1853 : ipv6_hdr(skb);
1854 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1896 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1855 ipv6h->payload_len = 0; 1897 ipv6h->payload_len = 0;
1856 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1898 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1946 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1988 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1947 } 1989 }
1948 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1990 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1949 if (tx_flags & I40E_TX_FLAGS_TSO) { 1991 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1950 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1992 if (tx_flags & I40E_TX_FLAGS_TSO)
1951 ip_hdr(skb)->check = 0; 1993 ip_hdr(skb)->check = 0;
1952 } else {
1953 *cd_tunneling |=
1954 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1955 }
1956 } 1994 }
1957 1995
1958 /* Now set the ctx descriptor fields */ 1996 /* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1962 ((skb_inner_network_offset(skb) - 2000 ((skb_inner_network_offset(skb) -
1963 skb_transport_offset(skb)) >> 1) << 2001 skb_transport_offset(skb)) >> 1) <<
1964 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 2002 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1965 2003 if (this_ip_hdr->version == 6) {
2004 tx_flags &= ~I40E_TX_FLAGS_IPV4;
2005 tx_flags |= I40E_TX_FLAGS_IPV6;
2006 }
1966 } else { 2007 } else {
1967 network_hdr_len = skb_network_header_len(skb); 2008 network_hdr_len = skb_network_header_len(skb);
1968 this_ip_hdr = ip_hdr(skb); 2009 this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2198 /* Place RS bit on last descriptor of any packet that spans across the 2239 /* Place RS bit on last descriptor of any packet that spans across the
2199 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. 2240 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2200 */ 2241 */
2201#define WB_STRIDE 0x3
2202 if (((i & WB_STRIDE) != WB_STRIDE) && 2242 if (((i & WB_STRIDE) != WB_STRIDE) &&
2203 (first <= &tx_ring->tx_bi[i]) && 2243 (first <= &tx_ring->tx_bi[i]) &&
2204 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { 2244 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e60d3accb2e2..18b00231d2f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -241,6 +241,7 @@ struct i40e_ring {
241 unsigned long last_rx_timestamp; 241 unsigned long last_rx_timestamp;
242 242
243 bool ring_active; /* is ring online or not */ 243 bool ring_active; /* is ring online or not */
244 bool arm_wb; /* do something to arm write back */
244 245
245 /* stats structs */ 246 /* stats structs */
246 struct i40e_queue_stats stats; 247 struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 051ea94bdcd3..0f69ef81751a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1125 u32 swmask = mask; 1125 u32 swmask = mask;
1126 u32 fwmask = mask << 16; 1126 u32 fwmask = mask << 16;
1127 s32 ret_val = 0; 1127 s32 ret_val = 0;
1128 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1128 s32 i = 0, timeout = 200;
1129 1129
1130 while (i < timeout) { 1130 while (i < timeout) {
1131 if (igb_get_hw_semaphore(hw)) { 1131 if (igb_get_hw_semaphore(hw)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 190cbd931f6b..ac6a8f1eea6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
475{ 475{
476 int err; 476 int err;
477 477
478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
479 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
479 return 0; /* do nothing */ 480 return 0; /* do nothing */
480 481
481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, 482 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
@@ -2365,9 +2366,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
2365 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2366 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2366} 2367}
2367 2368
2368static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) 2369static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2370 struct net_device *dev,
2371 netdev_features_t features)
2369{ 2372{
2370 return vxlan_gso_check(skb); 2373 return vxlan_features_check(skb, features);
2371} 2374}
2372#endif 2375#endif
2373 2376
@@ -2400,7 +2403,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2400#ifdef CONFIG_MLX4_EN_VXLAN 2403#ifdef CONFIG_MLX4_EN_VXLAN
2401 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2404 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2402 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2405 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2403 .ndo_gso_check = mlx4_en_gso_check, 2406 .ndo_features_check = mlx4_en_features_check,
2404#endif 2407#endif
2405}; 2408};
2406 2409
@@ -2434,7 +2437,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2434#ifdef CONFIG_MLX4_EN_VXLAN 2437#ifdef CONFIG_MLX4_EN_VXLAN
2435 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2438 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2436 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2439 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2437 .ndo_gso_check = mlx4_en_gso_check, 2440 .ndo_features_check = mlx4_en_features_check,
2438#endif 2441#endif
2439}; 2442};
2440 2443
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a308d41e4de0..e3357bf523df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
962 tx_desc->ctrl.owner_opcode = op_own; 962 tx_desc->ctrl.owner_opcode = op_own;
963 if (send_doorbell) { 963 if (send_doorbell) {
964 wmb(); 964 wmb();
965 iowrite32(ring->doorbell_qpn, 965 /* Since there is no iowrite*_native() that writes the
966 * value as is, without byteswapping - using the one
967 * the doesn't do byteswapping in the relevant arch
968 * endianness.
969 */
970#if defined(__LITTLE_ENDIAN)
971 iowrite32(
972#else
973 iowrite32be(
974#endif
975 ring->doorbell_qpn,
966 ring->bf.uar->map + MLX4_SEND_DOORBELL); 976 ring->bf.uar->map + MLX4_SEND_DOORBELL);
967 } else { 977 } else {
968 ring->xmit_more++; 978 ring->xmit_more++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 943cbd47d832..6e08352ec994 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1744 struct mlx4_dev_cap *dev_cap) 1744 struct mlx4_dev_cap *dev_cap)
1745{ 1745{
1746 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 1746 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1747 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS && 1747 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1748 dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
1749 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 1748 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1750 else 1749 else
1751 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 1750 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -1829,7 +1828,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1829 err = mlx4_dev_cap(dev, &dev_cap); 1828 err = mlx4_dev_cap(dev, &dev_cap);
1830 if (err) { 1829 if (err) {
1831 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1830 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1832 goto err_stop_fw; 1831 return err;
1833 } 1832 }
1834 1833
1835 choose_steering_mode(dev, &dev_cap); 1834 choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1859,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1860 &init_hca); 1859 &init_hca);
1861 if ((long long) icm_size < 0) { 1860 if ((long long) icm_size < 0) {
1862 err = icm_size; 1861 err = icm_size;
1863 goto err_stop_fw; 1862 return err;
1864 } 1863 }
1865 1864
1866 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 1865 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1873,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1874 1873
1875 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1874 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1876 if (err) 1875 if (err)
1877 goto err_stop_fw; 1876 return err;
1878 1877
1879 err = mlx4_INIT_HCA(dev, &init_hca); 1878 err = mlx4_INIT_HCA(dev, &init_hca);
1880 if (err) { 1879 if (err) {
@@ -1886,7 +1885,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1886 err = mlx4_query_func(dev, &dev_cap); 1885 err = mlx4_query_func(dev, &dev_cap);
1887 if (err < 0) { 1886 if (err < 0) {
1888 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1887 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
1889 goto err_stop_fw; 1888 goto err_close;
1890 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1889 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
1891 dev->caps.num_eqs = dev_cap.max_eqs; 1890 dev->caps.num_eqs = dev_cap.max_eqs;
1892 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 1891 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2005,6 @@ err_free_icm:
2006 if (!mlx4_is_slave(dev)) 2005 if (!mlx4_is_slave(dev))
2007 mlx4_free_icms(dev); 2006 mlx4_free_icms(dev);
2008 2007
2009err_stop_fw:
2010 if (!mlx4_is_slave(dev)) {
2011 mlx4_UNMAP_FA(dev);
2012 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
2013 }
2014 return err; 2008 return err;
2015} 2009}
2016 2010
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d6f549685c0f..7094a9c70fd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
584void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) 584void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
585{ 585{
586 mlx4_mtt_cleanup(dev, &mr->mtt); 586 mlx4_mtt_cleanup(dev, &mr->mtt);
587 mr->mtt.order = -1;
587} 588}
588EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); 589EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
589 590
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
593{ 594{
594 int err; 595 int err;
595 596
596 mpt_entry->start = cpu_to_be64(iova);
597 mpt_entry->length = cpu_to_be64(size);
598 mpt_entry->entity_size = cpu_to_be32(page_shift);
599
600 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 597 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
601 if (err) 598 if (err)
602 return err; 599 return err;
603 600
601 mpt_entry->start = cpu_to_be64(mr->iova);
602 mpt_entry->length = cpu_to_be64(mr->size);
603 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
604
604 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | 605 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
605 MLX4_MPT_PD_FLAG_EN_INV); 606 MLX4_MPT_PD_FLAG_EN_INV);
606 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | 607 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index f1ebed6c63b1..2fa6ae026e4f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
2303 2303
2304/* Spanning Tree */ 2304/* Spanning Tree */
2305 2305
2306static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
2307{
2308 port_cfg(hw, p,
2309 KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
2310}
2311
2312static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set) 2306static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
2313{ 2307{
2314 port_cfg(hw, p, 2308 port_cfg(hw, p,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index af099057f0e9..71af98bb72cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4033 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4033 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4034 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 4034 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
4035 &mgp->cmd_bus, GFP_KERNEL); 4035 &mgp->cmd_bus, GFP_KERNEL);
4036 if (mgp->cmd == NULL) 4036 if (!mgp->cmd) {
4037 status = -ENOMEM;
4037 goto abort_with_enabled; 4038 goto abort_with_enabled;
4039 }
4038 4040
4039 mgp->board_span = pci_resource_len(pdev, 0); 4041 mgp->board_span = pci_resource_len(pdev, 0);
4040 mgp->iomem_base = pci_resource_start(pdev, 0); 4042 mgp->iomem_base = pci_resource_start(pdev, 0);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f5e4b820128b..db0c7a9aee60 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
6987 if (sp->s2io_entries[i].in_use == MSIX_FLG) { 6987 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6988 if (sp->s2io_entries[i].type == 6988 if (sp->s2io_entries[i].type ==
6989 MSIX_RING_TYPE) { 6989 MSIX_RING_TYPE) {
6990 sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 6990 snprintf(sp->desc[i],
6991 sizeof(sp->desc[i]),
6992 "%s:MSI-X-%d-RX",
6991 dev->name, i); 6993 dev->name, i);
6992 err = request_irq(sp->entries[i].vector, 6994 err = request_irq(sp->entries[i].vector,
6993 s2io_msix_ring_handle, 6995 s2io_msix_ring_handle,
@@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
6996 sp->s2io_entries[i].arg); 6998 sp->s2io_entries[i].arg);
6997 } else if (sp->s2io_entries[i].type == 6999 } else if (sp->s2io_entries[i].type ==
6998 MSIX_ALARM_TYPE) { 7000 MSIX_ALARM_TYPE) {
6999 sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 7001 snprintf(sp->desc[i],
7002 sizeof(sp->desc[i]),
7003 "%s:MSI-X-%d-TX",
7000 dev->name, i); 7004 dev->name, i);
7001 err = request_irq(sp->entries[i].vector, 7005 err = request_irq(sp->entries[i].vector,
7002 s2io_msix_fifo_handle, 7006 s2io_msix_fifo_handle,
@@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8154 "%s: UDP Fragmentation Offload(UFO) enabled\n", 8158 "%s: UDP Fragmentation Offload(UFO) enabled\n",
8155 dev->name); 8159 dev->name);
8156 /* Initialize device name */ 8160 /* Initialize device name */
8157 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 8161 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8162 sp->product_name);
8158 8163
8159 if (vlan_tag_strip) 8164 if (vlan_tag_strip)
8160 sp->vlan_strip_flag = 1; 8165 sp->vlan_strip_flag = 1;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c2f09af5c25b..4847713211ca 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
146{ 146{
147 int i = 0; 147 int i = 0;
148 148
149 while (i < 10) { 149 do {
150 if (i)
151 ssleep(1);
152
153 if (ql_sem_lock(qdev, 150 if (ql_sem_lock(qdev,
154 QL_DRVR_SEM_MASK, 151 QL_DRVR_SEM_MASK,
155 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
158 "driver lock acquired\n"); 155 "driver lock acquired\n");
159 return 1; 156 return 1;
160 } 157 }
161 } 158 ssleep(1);
159 } while (++i < 10);
162 160
163 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 161 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
164 return 0; 162 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1aa25b13ace1..2528c3fb6b90 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
505 adapter->flags |= QLCNIC_DEL_VXLAN_PORT; 505 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
506} 506}
507 507
508static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) 508static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
509 struct net_device *dev,
510 netdev_features_t features)
509{ 511{
510 return vxlan_gso_check(skb); 512 return vxlan_features_check(skb, features);
511} 513}
512#endif 514#endif
513 515
@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
532#ifdef CONFIG_QLCNIC_VXLAN 534#ifdef CONFIG_QLCNIC_VXLAN
533 .ndo_add_vxlan_port = qlcnic_add_vxlan_port, 535 .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
534 .ndo_del_vxlan_port = qlcnic_del_vxlan_port, 536 .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
535 .ndo_gso_check = qlcnic_gso_check, 537 .ndo_features_check = qlcnic_features_check,
536#endif 538#endif
537#ifdef CONFIG_NET_POLL_CONTROLLER 539#ifdef CONFIG_NET_POLL_CONTROLLER
538 .ndo_poll_controller = qlcnic_poll_controller, 540 .ndo_poll_controller = qlcnic_poll_controller,
@@ -2603,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2603 } else { 2605 } else {
2604 dev_err(&pdev->dev, 2606 dev_err(&pdev->dev,
2605 "%s: failed. Please Reboot\n", __func__); 2607 "%s: failed. Please Reboot\n", __func__);
2608 err = -ENODEV;
2606 goto err_out_free_hw; 2609 goto err_out_free_hw;
2607 } 2610 }
2608 2611
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 6d0b9dfac313..78bb4ceb1cdd 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
787 if (rc) 787 if (rc)
788 goto err_out; 788 goto err_out;
789 789
790 disable_dev_on_err = 1;
790 rc = pci_request_regions (pdev, DRV_NAME); 791 rc = pci_request_regions (pdev, DRV_NAME);
791 if (rc) 792 if (rc)
792 goto err_out; 793 goto err_out;
793 disable_dev_on_err = 1;
794 794
795 pci_set_master (pdev); 795 pci_set_master (pdev);
796 796
@@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
1110 return 0; 1110 return 0;
1111 1111
1112err_out: 1112err_out:
1113 netif_napi_del(&tp->napi);
1113 __rtl8139_cleanup_dev (dev); 1114 __rtl8139_cleanup_dev (dev);
1114 pci_disable_device (pdev); 1115 pci_disable_device (pdev);
1115 return i; 1116 return i;
@@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
1124 assert (dev != NULL); 1125 assert (dev != NULL);
1125 1126
1126 cancel_delayed_work_sync(&tp->thread); 1127 cancel_delayed_work_sync(&tp->thread);
1128 netif_napi_del(&tp->napi);
1127 1129
1128 unregister_netdev (dev); 1130 unregister_netdev (dev);
1129 1131
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c29ba80ae02b..6576243222af 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
473 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 473 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
474 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 474 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
475 EESR_ECI, 475 EESR_ECI,
476 .fdr_value = 0x00000f0f,
476 477
477 .apr = 1, 478 .apr = 1,
478 .mpr = 1, 479 .mpr = 1,
@@ -495,6 +496,9 @@ static struct sh_eth_cpu_data r8a779x_data = {
495 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 496 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
496 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 497 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
497 EESR_ECI, 498 EESR_ECI,
499 .fdr_value = 0x00000f0f,
500
501 .trscer_err_mask = DESC_I_RINT8,
498 502
499 .apr = 1, 503 .apr = 1,
500 .mpr = 1, 504 .mpr = 1,
@@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
856 860
857 if (!cd->eesr_err_check) 861 if (!cd->eesr_err_check)
858 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 862 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
863
864 if (!cd->trscer_err_mask)
865 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
859} 866}
860 867
861static int sh_eth_check_reset(struct net_device *ndev) 868static int sh_eth_check_reset(struct net_device *ndev)
@@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1294 /* Frame recv control (enable multiple-packets per rx irq) */ 1301 /* Frame recv control (enable multiple-packets per rx irq) */
1295 sh_eth_write(ndev, RMCR_RNC, RMCR); 1302 sh_eth_write(ndev, RMCR_RNC, RMCR);
1296 1303
1297 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1304 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1298 1305
1299 if (mdp->cd->bculr) 1306 if (mdp->cd->bculr)
1300 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 1307 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -1820,6 +1827,9 @@ static int sh_eth_get_settings(struct net_device *ndev,
1820 unsigned long flags; 1827 unsigned long flags;
1821 int ret; 1828 int ret;
1822 1829
1830 if (!mdp->phydev)
1831 return -ENODEV;
1832
1823 spin_lock_irqsave(&mdp->lock, flags); 1833 spin_lock_irqsave(&mdp->lock, flags);
1824 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1834 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1825 spin_unlock_irqrestore(&mdp->lock, flags); 1835 spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1834,6 +1844,9 @@ static int sh_eth_set_settings(struct net_device *ndev,
1834 unsigned long flags; 1844 unsigned long flags;
1835 int ret; 1845 int ret;
1836 1846
1847 if (!mdp->phydev)
1848 return -ENODEV;
1849
1837 spin_lock_irqsave(&mdp->lock, flags); 1850 spin_lock_irqsave(&mdp->lock, flags);
1838 1851
1839 /* disable tx and rx */ 1852 /* disable tx and rx */
@@ -1868,6 +1881,9 @@ static int sh_eth_nway_reset(struct net_device *ndev)
1868 unsigned long flags; 1881 unsigned long flags;
1869 int ret; 1882 int ret;
1870 1883
1884 if (!mdp->phydev)
1885 return -ENODEV;
1886
1871 spin_lock_irqsave(&mdp->lock, flags); 1887 spin_lock_irqsave(&mdp->lock, flags);
1872 ret = phy_start_aneg(mdp->phydev); 1888 ret = phy_start_aneg(mdp->phydev);
1873 spin_unlock_irqrestore(&mdp->lock, flags); 1889 spin_unlock_irqrestore(&mdp->lock, flags);
@@ -2177,6 +2193,7 @@ static int sh_eth_close(struct net_device *ndev)
2177 if (mdp->phydev) { 2193 if (mdp->phydev) {
2178 phy_stop(mdp->phydev); 2194 phy_stop(mdp->phydev);
2179 phy_disconnect(mdp->phydev); 2195 phy_disconnect(mdp->phydev);
2196 mdp->phydev = NULL;
2180 } 2197 }
2181 2198
2182 free_irq(ndev->irq, ndev); 2199 free_irq(ndev->irq, ndev);
@@ -2410,7 +2427,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
2410 struct sh_eth_private *mdp = netdev_priv(ndev); 2427 struct sh_eth_private *mdp = netdev_priv(ndev);
2411 int i, ret; 2428 int i, ret;
2412 2429
2413 if (unlikely(!mdp->cd->tsu)) 2430 if (!mdp->cd->tsu)
2414 return 0; 2431 return 0;
2415 2432
2416 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 2433 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
@@ -2433,7 +2450,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2433 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2450 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2434 int i; 2451 int i;
2435 2452
2436 if (unlikely(!mdp->cd->tsu)) 2453 if (!mdp->cd->tsu)
2437 return; 2454 return;
2438 2455
2439 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2456 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
@@ -2443,8 +2460,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2443 } 2460 }
2444} 2461}
2445 2462
2446/* Multicast reception directions set */ 2463/* Update promiscuous flag and multicast filter */
2447static void sh_eth_set_multicast_list(struct net_device *ndev) 2464static void sh_eth_set_rx_mode(struct net_device *ndev)
2448{ 2465{
2449 struct sh_eth_private *mdp = netdev_priv(ndev); 2466 struct sh_eth_private *mdp = netdev_priv(ndev);
2450 u32 ecmr_bits; 2467 u32 ecmr_bits;
@@ -2455,7 +2472,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
2455 /* Initial condition is MCT = 1, PRM = 0. 2472 /* Initial condition is MCT = 1, PRM = 0.
2456 * Depending on ndev->flags, set PRM or clear MCT 2473 * Depending on ndev->flags, set PRM or clear MCT
2457 */ 2474 */
2458 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2475 ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2476 if (mdp->cd->tsu)
2477 ecmr_bits |= ECMR_MCT;
2459 2478
2460 if (!(ndev->flags & IFF_MULTICAST)) { 2479 if (!(ndev->flags & IFF_MULTICAST)) {
2461 sh_eth_tsu_purge_mcast(ndev); 2480 sh_eth_tsu_purge_mcast(ndev);
@@ -2484,9 +2503,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
2484 } 2503 }
2485 } 2504 }
2486 } 2505 }
2487 } else {
2488 /* Normal, unicast/broadcast-only mode. */
2489 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2490 } 2506 }
2491 2507
2492 /* update the ethernet mode */ 2508 /* update the ethernet mode */
@@ -2694,6 +2710,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
2694 .ndo_stop = sh_eth_close, 2710 .ndo_stop = sh_eth_close,
2695 .ndo_start_xmit = sh_eth_start_xmit, 2711 .ndo_start_xmit = sh_eth_start_xmit,
2696 .ndo_get_stats = sh_eth_get_stats, 2712 .ndo_get_stats = sh_eth_get_stats,
2713 .ndo_set_rx_mode = sh_eth_set_rx_mode,
2697 .ndo_tx_timeout = sh_eth_tx_timeout, 2714 .ndo_tx_timeout = sh_eth_tx_timeout,
2698 .ndo_do_ioctl = sh_eth_do_ioctl, 2715 .ndo_do_ioctl = sh_eth_do_ioctl,
2699 .ndo_validate_addr = eth_validate_addr, 2716 .ndo_validate_addr = eth_validate_addr,
@@ -2706,7 +2723,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2706 .ndo_stop = sh_eth_close, 2723 .ndo_stop = sh_eth_close,
2707 .ndo_start_xmit = sh_eth_start_xmit, 2724 .ndo_start_xmit = sh_eth_start_xmit,
2708 .ndo_get_stats = sh_eth_get_stats, 2725 .ndo_get_stats = sh_eth_get_stats,
2709 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2726 .ndo_set_rx_mode = sh_eth_set_rx_mode,
2710 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2727 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2711 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2728 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2712 .ndo_tx_timeout = sh_eth_tx_timeout, 2729 .ndo_tx_timeout = sh_eth_tx_timeout,
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 22301bf9c21d..71f5de1171bd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
369 DESC_I_RINT1 = 0x0001, 369 DESC_I_RINT1 = 0x0001,
370}; 370};
371 371
372#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
373
372/* RPADIR */ 374/* RPADIR */
373enum RPADIR_BIT { 375enum RPADIR_BIT {
374 RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, 376 RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
470 unsigned long tx_check; 472 unsigned long tx_check;
471 unsigned long eesr_err_check; 473 unsigned long eesr_err_check;
472 474
475 /* Error mask */
476 unsigned long trscer_err_mask;
477
473 /* hardware features */ 478 /* hardware features */
474 unsigned long irq_flags; /* IRQ configuration flags */ 479 unsigned long irq_flags; /* IRQ configuration flags */
475 unsigned no_psr:1; /* EtherC DO NOT have PSR */ 480 unsigned no_psr:1; /* EtherC DO NOT have PSR */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
deleted file mode 100644
index f537cbea20e5..000000000000
--- a/drivers/net/ethernet/s6gmac.c
+++ /dev/null
@@ -1,1058 +0,0 @@
1/*
2 * Ethernet driver for S6105 on chip network device
3 * (c)2008 emlix GmbH http://www.emlix.com
4 * Authors: Oskar Schirmer <oskar@scara.com>
5 * Daniel Gloeckner <dg@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/types.h>
16#include <linux/delay.h>
17#include <linux/spinlock.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/if.h>
21#include <linux/stddef.h>
22#include <linux/mii.h>
23#include <linux/phy.h>
24#include <linux/platform_device.h>
25#include <variant/hardware.h>
26#include <variant/dmac.h>
27
28#define DRV_NAME "s6gmac"
29#define DRV_PRMT DRV_NAME ": "
30
31
32/* register declarations */
33
34#define S6_GMAC_MACCONF1 0x000
35#define S6_GMAC_MACCONF1_TXENA 0
36#define S6_GMAC_MACCONF1_SYNCTX 1
37#define S6_GMAC_MACCONF1_RXENA 2
38#define S6_GMAC_MACCONF1_SYNCRX 3
39#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
40#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
41#define S6_GMAC_MACCONF1_LOOPBACK 8
42#define S6_GMAC_MACCONF1_RESTXFUNC 16
43#define S6_GMAC_MACCONF1_RESRXFUNC 17
44#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
45#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
46#define S6_GMAC_MACCONF1_SIMULRES 30
47#define S6_GMAC_MACCONF1_SOFTRES 31
48#define S6_GMAC_MACCONF2 0x004
49#define S6_GMAC_MACCONF2_FULL 0
50#define S6_GMAC_MACCONF2_CRCENA 1
51#define S6_GMAC_MACCONF2_PADCRCENA 2
52#define S6_GMAC_MACCONF2_LENGTHFCHK 4
53#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
54#define S6_GMAC_MACCONF2_IFMODE 8
55#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
56#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
57#define S6_GMAC_MACCONF2_IFMODE_MASK 3
58#define S6_GMAC_MACCONF2_PREAMBLELEN 12
59#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
60#define S6_GMAC_MACIPGIFG 0x008
61#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
62#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
63#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
64#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
65#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
66#define S6_GMAC_MACHALFDUPLEX 0x00C
67#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
68#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
69#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
70#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
71#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
72#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
73#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
74#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
75#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
76#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
77#define S6_GMAC_MACMAXFRAMELEN 0x010
78#define S6_GMAC_MACMIICONF 0x020
79#define S6_GMAC_MACMIICONF_CSEL 0
80#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
81#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
82#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
83#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
84#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
85#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
86#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
87#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
88#define S6_GMAC_MACMIICONF_CSEL_MASK 7
89#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
90#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
91#define S6_GMAC_MACMIICMD 0x024
92#define S6_GMAC_MACMIICMD_READ 0
93#define S6_GMAC_MACMIICMD_SCAN 1
94#define S6_GMAC_MACMIIADDR 0x028
95#define S6_GMAC_MACMIIADDR_REG 0
96#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
97#define S6_GMAC_MACMIIADDR_PHY 8
98#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
99#define S6_GMAC_MACMIICTRL 0x02C
100#define S6_GMAC_MACMIISTAT 0x030
101#define S6_GMAC_MACMIIINDI 0x034
102#define S6_GMAC_MACMIIINDI_BUSY 0
103#define S6_GMAC_MACMIIINDI_SCAN 1
104#define S6_GMAC_MACMIIINDI_INVAL 2
105#define S6_GMAC_MACINTERFSTAT 0x03C
106#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
107#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
108#define S6_GMAC_MACSTATADDR1 0x040
109#define S6_GMAC_MACSTATADDR2 0x044
110
111#define S6_GMAC_FIFOCONF0 0x048
112#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
113#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
114#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
115#define S6_GMAC_FIFOCONF0_HSTRSTST 3
116#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
117#define S6_GMAC_FIFOCONF0_WTMENREQ 8
118#define S6_GMAC_FIFOCONF0_SRFENREQ 9
119#define S6_GMAC_FIFOCONF0_FRFENREQ 10
120#define S6_GMAC_FIFOCONF0_STFENREQ 11
121#define S6_GMAC_FIFOCONF0_FTFENREQ 12
122#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
123#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
124#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
125#define S6_GMAC_FIFOCONF0_STFENRPLY 19
126#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
127#define S6_GMAC_FIFOCONF1 0x04C
128#define S6_GMAC_FIFOCONF2 0x050
129#define S6_GMAC_FIFOCONF2_CFGLWM 0
130#define S6_GMAC_FIFOCONF2_CFGHWM 16
131#define S6_GMAC_FIFOCONF3 0x054
132#define S6_GMAC_FIFOCONF3_CFGFTTH 0
133#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
134#define S6_GMAC_FIFOCONF4 0x058
135#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
136#define S6_GMAC_FIFOCONF_RSV_RUNT 1
137#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
138#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
139#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
140#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
141#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
142#define S6_GMAC_FIFOCONF_RSV_OK 7
143#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
144#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
145#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
146#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
147#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
148#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
149#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
150#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
151#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
152#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
153#define S6_GMAC_FIFOCONF5 0x05C
154#define S6_GMAC_FIFOCONF5_DROPLT64 18
155#define S6_GMAC_FIFOCONF5_CFGBYTM 19
156#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
157#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
158
159#define S6_GMAC_STAT_REGS 0x080
160#define S6_GMAC_STAT_SIZE_MIN 12
161#define S6_GMAC_STATTR64 0x080
162#define S6_GMAC_STATTR64_SIZE 18
163#define S6_GMAC_STATTR127 0x084
164#define S6_GMAC_STATTR127_SIZE 18
165#define S6_GMAC_STATTR255 0x088
166#define S6_GMAC_STATTR255_SIZE 18
167#define S6_GMAC_STATTR511 0x08C
168#define S6_GMAC_STATTR511_SIZE 18
169#define S6_GMAC_STATTR1K 0x090
170#define S6_GMAC_STATTR1K_SIZE 18
171#define S6_GMAC_STATTRMAX 0x094
172#define S6_GMAC_STATTRMAX_SIZE 18
173#define S6_GMAC_STATTRMGV 0x098
174#define S6_GMAC_STATTRMGV_SIZE 18
175#define S6_GMAC_STATRBYT 0x09C
176#define S6_GMAC_STATRBYT_SIZE 24
177#define S6_GMAC_STATRPKT 0x0A0
178#define S6_GMAC_STATRPKT_SIZE 18
179#define S6_GMAC_STATRFCS 0x0A4
180#define S6_GMAC_STATRFCS_SIZE 12
181#define S6_GMAC_STATRMCA 0x0A8
182#define S6_GMAC_STATRMCA_SIZE 18
183#define S6_GMAC_STATRBCA 0x0AC
184#define S6_GMAC_STATRBCA_SIZE 22
185#define S6_GMAC_STATRXCF 0x0B0
186#define S6_GMAC_STATRXCF_SIZE 18
187#define S6_GMAC_STATRXPF 0x0B4
188#define S6_GMAC_STATRXPF_SIZE 12
189#define S6_GMAC_STATRXUO 0x0B8
190#define S6_GMAC_STATRXUO_SIZE 12
191#define S6_GMAC_STATRALN 0x0BC
192#define S6_GMAC_STATRALN_SIZE 12
193#define S6_GMAC_STATRFLR 0x0C0
194#define S6_GMAC_STATRFLR_SIZE 16
195#define S6_GMAC_STATRCDE 0x0C4
196#define S6_GMAC_STATRCDE_SIZE 12
197#define S6_GMAC_STATRCSE 0x0C8
198#define S6_GMAC_STATRCSE_SIZE 12
199#define S6_GMAC_STATRUND 0x0CC
200#define S6_GMAC_STATRUND_SIZE 12
201#define S6_GMAC_STATROVR 0x0D0
202#define S6_GMAC_STATROVR_SIZE 12
203#define S6_GMAC_STATRFRG 0x0D4
204#define S6_GMAC_STATRFRG_SIZE 12
205#define S6_GMAC_STATRJBR 0x0D8
206#define S6_GMAC_STATRJBR_SIZE 12
207#define S6_GMAC_STATRDRP 0x0DC
208#define S6_GMAC_STATRDRP_SIZE 12
209#define S6_GMAC_STATTBYT 0x0E0
210#define S6_GMAC_STATTBYT_SIZE 24
211#define S6_GMAC_STATTPKT 0x0E4
212#define S6_GMAC_STATTPKT_SIZE 18
213#define S6_GMAC_STATTMCA 0x0E8
214#define S6_GMAC_STATTMCA_SIZE 18
215#define S6_GMAC_STATTBCA 0x0EC
216#define S6_GMAC_STATTBCA_SIZE 18
217#define S6_GMAC_STATTXPF 0x0F0
218#define S6_GMAC_STATTXPF_SIZE 12
219#define S6_GMAC_STATTDFR 0x0F4
220#define S6_GMAC_STATTDFR_SIZE 12
221#define S6_GMAC_STATTEDF 0x0F8
222#define S6_GMAC_STATTEDF_SIZE 12
223#define S6_GMAC_STATTSCL 0x0FC
224#define S6_GMAC_STATTSCL_SIZE 12
225#define S6_GMAC_STATTMCL 0x100
226#define S6_GMAC_STATTMCL_SIZE 12
227#define S6_GMAC_STATTLCL 0x104
228#define S6_GMAC_STATTLCL_SIZE 12
229#define S6_GMAC_STATTXCL 0x108
230#define S6_GMAC_STATTXCL_SIZE 12
231#define S6_GMAC_STATTNCL 0x10C
232#define S6_GMAC_STATTNCL_SIZE 13
233#define S6_GMAC_STATTPFH 0x110
234#define S6_GMAC_STATTPFH_SIZE 12
235#define S6_GMAC_STATTDRP 0x114
236#define S6_GMAC_STATTDRP_SIZE 12
237#define S6_GMAC_STATTJBR 0x118
238#define S6_GMAC_STATTJBR_SIZE 12
239#define S6_GMAC_STATTFCS 0x11C
240#define S6_GMAC_STATTFCS_SIZE 12
241#define S6_GMAC_STATTXCF 0x120
242#define S6_GMAC_STATTXCF_SIZE 12
243#define S6_GMAC_STATTOVR 0x124
244#define S6_GMAC_STATTOVR_SIZE 12
245#define S6_GMAC_STATTUND 0x128
246#define S6_GMAC_STATTUND_SIZE 12
247#define S6_GMAC_STATTFRG 0x12C
248#define S6_GMAC_STATTFRG_SIZE 12
249#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
250#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
251#define S6_GMAC_STATCARRY1_RDRP 0
252#define S6_GMAC_STATCARRY1_RJBR 1
253#define S6_GMAC_STATCARRY1_RFRG 2
254#define S6_GMAC_STATCARRY1_ROVR 3
255#define S6_GMAC_STATCARRY1_RUND 4
256#define S6_GMAC_STATCARRY1_RCSE 5
257#define S6_GMAC_STATCARRY1_RCDE 6
258#define S6_GMAC_STATCARRY1_RFLR 7
259#define S6_GMAC_STATCARRY1_RALN 8
260#define S6_GMAC_STATCARRY1_RXUO 9
261#define S6_GMAC_STATCARRY1_RXPF 10
262#define S6_GMAC_STATCARRY1_RXCF 11
263#define S6_GMAC_STATCARRY1_RBCA 12
264#define S6_GMAC_STATCARRY1_RMCA 13
265#define S6_GMAC_STATCARRY1_RFCS 14
266#define S6_GMAC_STATCARRY1_RPKT 15
267#define S6_GMAC_STATCARRY1_RBYT 16
268#define S6_GMAC_STATCARRY1_TRMGV 25
269#define S6_GMAC_STATCARRY1_TRMAX 26
270#define S6_GMAC_STATCARRY1_TR1K 27
271#define S6_GMAC_STATCARRY1_TR511 28
272#define S6_GMAC_STATCARRY1_TR255 29
273#define S6_GMAC_STATCARRY1_TR127 30
274#define S6_GMAC_STATCARRY1_TR64 31
275#define S6_GMAC_STATCARRY2_TDRP 0
276#define S6_GMAC_STATCARRY2_TPFH 1
277#define S6_GMAC_STATCARRY2_TNCL 2
278#define S6_GMAC_STATCARRY2_TXCL 3
279#define S6_GMAC_STATCARRY2_TLCL 4
280#define S6_GMAC_STATCARRY2_TMCL 5
281#define S6_GMAC_STATCARRY2_TSCL 6
282#define S6_GMAC_STATCARRY2_TEDF 7
283#define S6_GMAC_STATCARRY2_TDFR 8
284#define S6_GMAC_STATCARRY2_TXPF 9
285#define S6_GMAC_STATCARRY2_TBCA 10
286#define S6_GMAC_STATCARRY2_TMCA 11
287#define S6_GMAC_STATCARRY2_TPKT 12
288#define S6_GMAC_STATCARRY2_TBYT 13
289#define S6_GMAC_STATCARRY2_TFRG 14
290#define S6_GMAC_STATCARRY2_TUND 15
291#define S6_GMAC_STATCARRY2_TOVR 16
292#define S6_GMAC_STATCARRY2_TXCF 17
293#define S6_GMAC_STATCARRY2_TFCS 18
294#define S6_GMAC_STATCARRY2_TJBR 19
295
296#define S6_GMAC_HOST_PBLKCTRL 0x140
297#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
298#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
299#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
300#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
301#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
302#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
303#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
304#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
305#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
306#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
307#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
308#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
309#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
310#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
311#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
312#define S6_GMAC_HOST_INTMASK 0x144
313#define S6_GMAC_HOST_INTSTAT 0x148
314#define S6_GMAC_HOST_INT_TXBURSTOVER 3
315#define S6_GMAC_HOST_INT_TXPREWOVER 4
316#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
317#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
318#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
319#define S6_GMAC_HOST_RXFIFOHWM 0x14C
320#define S6_GMAC_HOST_CTRLFRAMXP 0x150
321#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
322#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
323#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
324#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
325
326#define S6_GMAC_BURST_PREWR 0x1B0
327#define S6_GMAC_BURST_PREWR_LEN 0
328#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
329#define S6_GMAC_BURST_PREWR_CFE 20
330#define S6_GMAC_BURST_PREWR_PPE 21
331#define S6_GMAC_BURST_PREWR_FCS 22
332#define S6_GMAC_BURST_PREWR_PAD 23
333#define S6_GMAC_BURST_POSTRD 0x1D0
334#define S6_GMAC_BURST_POSTRD_LEN 0
335#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
336#define S6_GMAC_BURST_POSTRD_DROP 20
337
338
339/* data handling */
340
341#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
342#define S6_NUM_RX_SKB 16
343#define S6_MAX_FRLEN 1536
344
345struct s6gmac {
346 u32 reg;
347 u32 tx_dma;
348 u32 rx_dma;
349 u32 io;
350 u8 tx_chan;
351 u8 rx_chan;
352 spinlock_t lock;
353 u8 tx_skb_i, tx_skb_o;
354 u8 rx_skb_i, rx_skb_o;
355 struct sk_buff *tx_skb[S6_NUM_TX_SKB];
356 struct sk_buff *rx_skb[S6_NUM_RX_SKB];
357 unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
358 unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
359 struct phy_device *phydev;
360 struct {
361 struct mii_bus *bus;
362 int irq[PHY_MAX_ADDR];
363 } mii;
364 struct {
365 unsigned int mbit;
366 u8 giga;
367 u8 isup;
368 u8 full;
369 } link;
370};
371
372static void s6gmac_rx_fillfifo(struct net_device *dev)
373{
374 struct s6gmac *pd = netdev_priv(dev);
375 struct sk_buff *skb;
376 while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
377 (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
378 (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
379 pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
380 s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
381 pd->io, (u32)skb->data, S6_MAX_FRLEN);
382 }
383}
384
385static void s6gmac_rx_interrupt(struct net_device *dev)
386{
387 struct s6gmac *pd = netdev_priv(dev);
388 u32 pfx;
389 struct sk_buff *skb;
390 while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
391 s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
392 skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
393 pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
394 if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
395 dev_kfree_skb_irq(skb);
396 } else {
397 skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
398 & S6_GMAC_BURST_POSTRD_LEN_MASK);
399 skb->protocol = eth_type_trans(skb, dev);
400 skb->ip_summed = CHECKSUM_UNNECESSARY;
401 netif_rx(skb);
402 }
403 }
404}
405
406static void s6gmac_tx_interrupt(struct net_device *dev)
407{
408 struct s6gmac *pd = netdev_priv(dev);
409 while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
410 s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
411 dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
412 }
413 if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
414 netif_wake_queue(dev);
415}
416
417struct s6gmac_statinf {
418 unsigned reg_size : 4; /* 0: unused */
419 unsigned reg_off : 6;
420 unsigned net_index : 6;
421};
422
423#define S6_STATS_B (8 * sizeof(u32))
424#define S6_STATS_C(b, r, f) [b] = { \
425 BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
426 BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
427 >= (1<<4)) + \
428 r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
429 BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
430 >= ((1<<6)-1)) + \
431 (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
432 BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
433 % sizeof(unsigned long)) + \
434 BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
435 / sizeof(unsigned long)) >= (1<<6))) + \
436 BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
437 != sizeof(unsigned long))) + \
438 (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
439
440static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
441 S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
442 S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
443 S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
444 S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
445 S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
446 S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
447 S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
448 S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
449 S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
450 S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
451 S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
452 S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
453}, {
454 S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
455 S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
456 S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
457 S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
458 S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
459 S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
460 S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
461 S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
462 S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
463 S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
464 S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
465} };
466
467static void s6gmac_stats_collect(struct s6gmac *pd,
468 const struct s6gmac_statinf *inf)
469{
470 int b;
471 for (b = 0; b < S6_STATS_B; b++) {
472 if (inf[b].reg_size) {
473 pd->stats[inf[b].net_index] +=
474 readl(pd->reg + S6_GMAC_STAT_REGS
475 + sizeof(u32) * inf[b].reg_off);
476 }
477 }
478}
479
480static void s6gmac_stats_carry(struct s6gmac *pd,
481 const struct s6gmac_statinf *inf, u32 mask)
482{
483 int b;
484 while (mask) {
485 b = fls(mask) - 1;
486 mask &= ~(1 << b);
487 pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
488 }
489}
490
491static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
492{
493 int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
494 ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
495 return r;
496}
497
498static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
499{
500 u32 mask;
501 mask = s6gmac_stats_pending(pd, carry);
502 if (mask) {
503 writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
504 s6gmac_stats_carry(pd, &statinf[carry][0], mask);
505 }
506}
507
508static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
509{
510 struct net_device *dev = (struct net_device *)dev_id;
511 struct s6gmac *pd = netdev_priv(dev);
512 if (!dev)
513 return IRQ_NONE;
514 spin_lock(&pd->lock);
515 if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
516 s6gmac_rx_interrupt(dev);
517 s6gmac_rx_fillfifo(dev);
518 if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
519 s6gmac_tx_interrupt(dev);
520 s6gmac_stats_interrupt(pd, 0);
521 s6gmac_stats_interrupt(pd, 1);
522 spin_unlock(&pd->lock);
523 return IRQ_HANDLED;
524}
525
526static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
527 u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
528{
529 writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
530 writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
531 writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
532 writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
533}
534
535static inline void s6gmac_stop_device(struct net_device *dev)
536{
537 struct s6gmac *pd = netdev_priv(dev);
538 writel(0, pd->reg + S6_GMAC_MACCONF1);
539}
540
541static inline void s6gmac_init_device(struct net_device *dev)
542{
543 struct s6gmac *pd = netdev_priv(dev);
544 int is_rgmii = !!(pd->phydev->supported
545 & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
546#if 0
547 writel(1 << S6_GMAC_MACCONF1_SYNCTX |
548 1 << S6_GMAC_MACCONF1_SYNCRX |
549 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
550 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
551 1 << S6_GMAC_MACCONF1_RESTXFUNC |
552 1 << S6_GMAC_MACCONF1_RESRXFUNC |
553 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
554 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
555 pd->reg + S6_GMAC_MACCONF1);
556#endif
557 writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
558 udelay(1000);
559 writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
560 pd->reg + S6_GMAC_MACCONF1);
561 writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
562 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
563 pd->reg + S6_GMAC_HOST_PBLKCTRL);
564 writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
565 S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
566 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
567 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
568 is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
569 pd->reg + S6_GMAC_HOST_PBLKCTRL);
570 writel(1 << S6_GMAC_MACCONF1_TXENA |
571 1 << S6_GMAC_MACCONF1_RXENA |
572 (dev->flags & IFF_LOOPBACK ? 1 : 0)
573 << S6_GMAC_MACCONF1_LOOPBACK,
574 pd->reg + S6_GMAC_MACCONF1);
575 writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
576 dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
577 pd->reg + S6_GMAC_MACMAXFRAMELEN);
578 writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
579 1 << S6_GMAC_MACCONF2_PADCRCENA |
580 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
581 (pd->link.giga ?
582 S6_GMAC_MACCONF2_IFMODE_BYTE :
583 S6_GMAC_MACCONF2_IFMODE_NIBBLE)
584 << S6_GMAC_MACCONF2_IFMODE |
585 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
586 pd->reg + S6_GMAC_MACCONF2);
587 writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
588 writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
589 writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
590 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
591 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
592 1 << S6_GMAC_FIFOCONF0_STFENREQ |
593 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
594 pd->reg + S6_GMAC_FIFOCONF0);
595 writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
596 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
597 pd->reg + S6_GMAC_FIFOCONF3);
598 writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
599 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
600 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
601 1 << S6_GMAC_FIFOCONF_RSV_OK |
602 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
603 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
604 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
605 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
606 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
607 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
608 pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
609 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
610 pd->reg + S6_GMAC_FIFOCONF5);
611 writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
612 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
613 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
614 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
615 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
616 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
617 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
618 pd->reg + S6_GMAC_FIFOCONF4);
619 s6gmac_set_dstaddr(pd, 0,
620 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
621 s6gmac_set_dstaddr(pd, 1,
622 dev->dev_addr[5] |
623 dev->dev_addr[4] << 8 |
624 dev->dev_addr[3] << 16 |
625 dev->dev_addr[2] << 24,
626 dev->dev_addr[1] |
627 dev->dev_addr[0] << 8,
628 0xFFFFFFFF, 0x0000FFFF);
629 s6gmac_set_dstaddr(pd, 2,
630 0x00000000, 0x00000100, 0x00000000, 0x00000100);
631 s6gmac_set_dstaddr(pd, 3,
632 0x00000000, 0x00000000, 0x00000000, 0x00000000);
633 writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
634 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
635 S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
636 S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
637 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
638 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
639 is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
640 pd->reg + S6_GMAC_HOST_PBLKCTRL);
641}
642
643static void s6mii_enable(struct s6gmac *pd)
644{
645 writel(readl(pd->reg + S6_GMAC_MACCONF1) &
646 ~(1 << S6_GMAC_MACCONF1_SOFTRES),
647 pd->reg + S6_GMAC_MACCONF1);
648 writel((readl(pd->reg + S6_GMAC_MACMIICONF)
649 & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
650 | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
651 pd->reg + S6_GMAC_MACMIICONF);
652}
653
654static int s6mii_busy(struct s6gmac *pd, int tmo)
655{
656 while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
657 if (--tmo == 0)
658 return -ETIME;
659 udelay(64);
660 }
661 return 0;
662}
663
664static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
665{
666 struct s6gmac *pd = bus->priv;
667 s6mii_enable(pd);
668 if (s6mii_busy(pd, 256))
669 return -ETIME;
670 writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
671 regnum << S6_GMAC_MACMIIADDR_REG,
672 pd->reg + S6_GMAC_MACMIIADDR);
673 writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
674 writel(0, pd->reg + S6_GMAC_MACMIICMD);
675 if (s6mii_busy(pd, 256))
676 return -ETIME;
677 return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
678}
679
680static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
681{
682 struct s6gmac *pd = bus->priv;
683 s6mii_enable(pd);
684 if (s6mii_busy(pd, 256))
685 return -ETIME;
686 writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
687 regnum << S6_GMAC_MACMIIADDR_REG,
688 pd->reg + S6_GMAC_MACMIIADDR);
689 writel(value, pd->reg + S6_GMAC_MACMIICTRL);
690 if (s6mii_busy(pd, 256))
691 return -ETIME;
692 return 0;
693}
694
695static int s6mii_reset(struct mii_bus *bus)
696{
697 struct s6gmac *pd = bus->priv;
698 s6mii_enable(pd);
699 if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
700 return -ETIME;
701 return 0;
702}
703
704static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
705{
706 u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
707 pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
708 switch (pd->link.mbit) {
709 case 10:
710 pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
711 break;
712 case 100:
713 pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
714 break;
715 case 1000:
716 pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
717 break;
718 default:
719 return;
720 }
721 writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
722}
723
724static inline void s6gmac_linkisup(struct net_device *dev, int isup)
725{
726 struct s6gmac *pd = netdev_priv(dev);
727 struct phy_device *phydev = pd->phydev;
728
729 pd->link.full = phydev->duplex;
730 pd->link.giga = (phydev->speed == 1000);
731 if (pd->link.mbit != phydev->speed) {
732 pd->link.mbit = phydev->speed;
733 s6gmac_set_rgmii_txclock(pd);
734 }
735 pd->link.isup = isup;
736 if (isup)
737 netif_carrier_on(dev);
738 phy_print_status(phydev);
739}
740
741static void s6gmac_adjust_link(struct net_device *dev)
742{
743 struct s6gmac *pd = netdev_priv(dev);
744 struct phy_device *phydev = pd->phydev;
745 if (pd->link.isup &&
746 (!phydev->link ||
747 (pd->link.mbit != phydev->speed) ||
748 (pd->link.full != phydev->duplex))) {
749 pd->link.isup = 0;
750 netif_tx_disable(dev);
751 if (!phydev->link) {
752 netif_carrier_off(dev);
753 phy_print_status(phydev);
754 }
755 }
756 if (!pd->link.isup && phydev->link) {
757 if (pd->link.full != phydev->duplex) {
758 u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
759 if (phydev->duplex)
760 maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
761 else
762 maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
763 writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
764 }
765
766 if (pd->link.giga != (phydev->speed == 1000)) {
767 u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
768 u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
769 maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
770 << S6_GMAC_MACCONF2_IFMODE);
771 if (phydev->speed == 1000) {
772 fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
773 maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
774 << S6_GMAC_MACCONF2_IFMODE;
775 } else {
776 fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
777 maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
778 << S6_GMAC_MACCONF2_IFMODE;
779 }
780 writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
781 writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
782 }
783
784 if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
785 netif_wake_queue(dev);
786 s6gmac_linkisup(dev, 1);
787 }
788}
789
790static inline int s6gmac_phy_start(struct net_device *dev)
791{
792 struct s6gmac *pd = netdev_priv(dev);
793 int i = 0;
794 struct phy_device *p = NULL;
795 while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
796 i++;
797 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
798 PHY_INTERFACE_MODE_RGMII);
799 if (IS_ERR(p)) {
800 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
801 return PTR_ERR(p);
802 }
803 p->supported &= PHY_GBIT_FEATURES;
804 p->advertising = p->supported;
805 pd->phydev = p;
806 return 0;
807}
808
809static inline void s6gmac_init_stats(struct net_device *dev)
810{
811 struct s6gmac *pd = netdev_priv(dev);
812 u32 mask;
813 mask = 1 << S6_GMAC_STATCARRY1_RDRP |
814 1 << S6_GMAC_STATCARRY1_RJBR |
815 1 << S6_GMAC_STATCARRY1_RFRG |
816 1 << S6_GMAC_STATCARRY1_ROVR |
817 1 << S6_GMAC_STATCARRY1_RUND |
818 1 << S6_GMAC_STATCARRY1_RCDE |
819 1 << S6_GMAC_STATCARRY1_RFLR |
820 1 << S6_GMAC_STATCARRY1_RALN |
821 1 << S6_GMAC_STATCARRY1_RMCA |
822 1 << S6_GMAC_STATCARRY1_RFCS |
823 1 << S6_GMAC_STATCARRY1_RPKT |
824 1 << S6_GMAC_STATCARRY1_RBYT;
825 writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
826 writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
827 mask = 1 << S6_GMAC_STATCARRY2_TDRP |
828 1 << S6_GMAC_STATCARRY2_TNCL |
829 1 << S6_GMAC_STATCARRY2_TXCL |
830 1 << S6_GMAC_STATCARRY2_TEDF |
831 1 << S6_GMAC_STATCARRY2_TPKT |
832 1 << S6_GMAC_STATCARRY2_TBYT |
833 1 << S6_GMAC_STATCARRY2_TFRG |
834 1 << S6_GMAC_STATCARRY2_TUND |
835 1 << S6_GMAC_STATCARRY2_TOVR |
836 1 << S6_GMAC_STATCARRY2_TFCS |
837 1 << S6_GMAC_STATCARRY2_TJBR;
838 writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
839 writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
840}
841
842static inline void s6gmac_init_dmac(struct net_device *dev)
843{
844 struct s6gmac *pd = netdev_priv(dev);
845 s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
846 s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
847 s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
848 s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
849}
850
851static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
852{
853 struct s6gmac *pd = netdev_priv(dev);
854 unsigned long flags;
855
856 spin_lock_irqsave(&pd->lock, flags);
857 writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
858 0 << S6_GMAC_BURST_PREWR_CFE |
859 1 << S6_GMAC_BURST_PREWR_PPE |
860 1 << S6_GMAC_BURST_PREWR_FCS |
861 ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
862 pd->reg + S6_GMAC_BURST_PREWR);
863 s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
864 (u32)skb->data, pd->io, skb->len);
865 if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
866 netif_stop_queue(dev);
867 if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
868 printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
869 pd->tx_skb_o, pd->tx_skb_i);
870 BUG();
871 }
872 pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
873 spin_unlock_irqrestore(&pd->lock, flags);
874 return 0;
875}
876
877static void s6gmac_tx_timeout(struct net_device *dev)
878{
879 struct s6gmac *pd = netdev_priv(dev);
880 unsigned long flags;
881 spin_lock_irqsave(&pd->lock, flags);
882 s6gmac_tx_interrupt(dev);
883 spin_unlock_irqrestore(&pd->lock, flags);
884}
885
886static int s6gmac_open(struct net_device *dev)
887{
888 struct s6gmac *pd = netdev_priv(dev);
889 unsigned long flags;
890 phy_read_status(pd->phydev);
891 spin_lock_irqsave(&pd->lock, flags);
892 pd->link.mbit = 0;
893 s6gmac_linkisup(dev, pd->phydev->link);
894 s6gmac_init_device(dev);
895 s6gmac_init_stats(dev);
896 s6gmac_init_dmac(dev);
897 s6gmac_rx_fillfifo(dev);
898 s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
899 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
900 s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
901 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
902 writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
903 0 << S6_GMAC_HOST_INT_TXPREWOVER |
904 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
905 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
906 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
907 pd->reg + S6_GMAC_HOST_INTMASK);
908 spin_unlock_irqrestore(&pd->lock, flags);
909 phy_start(pd->phydev);
910 netif_start_queue(dev);
911 return 0;
912}
913
914static int s6gmac_stop(struct net_device *dev)
915{
916 struct s6gmac *pd = netdev_priv(dev);
917 unsigned long flags;
918 netif_stop_queue(dev);
919 phy_stop(pd->phydev);
920 spin_lock_irqsave(&pd->lock, flags);
921 s6gmac_init_dmac(dev);
922 s6gmac_stop_device(dev);
923 while (pd->tx_skb_i != pd->tx_skb_o)
924 dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
925 while (pd->rx_skb_i != pd->rx_skb_o)
926 dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
927 spin_unlock_irqrestore(&pd->lock, flags);
928 return 0;
929}
930
931static struct net_device_stats *s6gmac_stats(struct net_device *dev)
932{
933 struct s6gmac *pd = netdev_priv(dev);
934 struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
935 int i;
936 do {
937 unsigned long flags;
938 spin_lock_irqsave(&pd->lock, flags);
939 for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
940 pd->stats[i] =
941 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
942 s6gmac_stats_collect(pd, &statinf[0][0]);
943 s6gmac_stats_collect(pd, &statinf[1][0]);
944 i = s6gmac_stats_pending(pd, 0) |
945 s6gmac_stats_pending(pd, 1);
946 spin_unlock_irqrestore(&pd->lock, flags);
947 } while (i);
948 st->rx_errors = st->rx_crc_errors +
949 st->rx_frame_errors +
950 st->rx_length_errors +
951 st->rx_missed_errors;
952 st->tx_errors += st->tx_aborted_errors;
953 return st;
954}
955
956static int s6gmac_probe(struct platform_device *pdev)
957{
958 struct net_device *dev;
959 struct s6gmac *pd;
960 int res;
961 unsigned long i;
962 struct mii_bus *mb;
963
964 dev = alloc_etherdev(sizeof(*pd));
965 if (!dev)
966 return -ENOMEM;
967
968 dev->open = s6gmac_open;
969 dev->stop = s6gmac_stop;
970 dev->hard_start_xmit = s6gmac_tx;
971 dev->tx_timeout = s6gmac_tx_timeout;
972 dev->watchdog_timeo = HZ;
973 dev->get_stats = s6gmac_stats;
974 dev->irq = platform_get_irq(pdev, 0);
975 pd = netdev_priv(dev);
976 memset(pd, 0, sizeof(*pd));
977 spin_lock_init(&pd->lock);
978 pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
979 i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
980 pd->tx_dma = DMA_MASK_DMAC(i);
981 pd->tx_chan = DMA_INDEX_CHNL(i);
982 i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
983 pd->rx_dma = DMA_MASK_DMAC(i);
984 pd->rx_chan = DMA_INDEX_CHNL(i);
985 pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
986 res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
987 if (res) {
988 printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
989 goto errirq;
990 }
991 res = register_netdev(dev);
992 if (res) {
993 printk(KERN_ERR DRV_PRMT "error registering device %s\n",
994 dev->name);
995 goto errdev;
996 }
997 mb = mdiobus_alloc();
998 if (!mb) {
999 printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
1000 res = -ENOMEM;
1001 goto errmii;
1002 }
1003 mb->name = "s6gmac_mii";
1004 mb->read = s6mii_read;
1005 mb->write = s6mii_write;
1006 mb->reset = s6mii_reset;
1007 mb->priv = pd;
1008 snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
1009 mb->phy_mask = ~(1 << 0);
1010 mb->irq = &pd->mii.irq[0];
1011 for (i = 0; i < PHY_MAX_ADDR; i++) {
1012 int n = platform_get_irq(pdev, i + 1);
1013 if (n < 0)
1014 n = PHY_POLL;
1015 pd->mii.irq[i] = n;
1016 }
1017 mdiobus_register(mb);
1018 pd->mii.bus = mb;
1019 res = s6gmac_phy_start(dev);
1020 if (res)
1021 return res;
1022 platform_set_drvdata(pdev, dev);
1023 return 0;
1024errmii:
1025 unregister_netdev(dev);
1026errdev:
1027 free_irq(dev->irq, dev);
1028errirq:
1029 free_netdev(dev);
1030 return res;
1031}
1032
1033static int s6gmac_remove(struct platform_device *pdev)
1034{
1035 struct net_device *dev = platform_get_drvdata(pdev);
1036 if (dev) {
1037 struct s6gmac *pd = netdev_priv(dev);
1038 mdiobus_unregister(pd->mii.bus);
1039 unregister_netdev(dev);
1040 free_irq(dev->irq, dev);
1041 free_netdev(dev);
1042 }
1043 return 0;
1044}
1045
1046static struct platform_driver s6gmac_driver = {
1047 .probe = s6gmac_probe,
1048 .remove = s6gmac_remove,
1049 .driver = {
1050 .name = "s6gmac",
1051 },
1052};
1053
1054module_platform_driver(s6gmac_driver);
1055
1056MODULE_LICENSE("GPL");
1057MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
1058MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 698494481d18..b1a271853d85 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
474 /* allocate memory for RX skbuff array */ 474 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, 475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL); 476 sizeof(dma_addr_t), GFP_KERNEL);
477 if (rx_ring->rx_skbuff_dma == NULL) 477 if (!rx_ring->rx_skbuff_dma) {
478 goto dmamem_err; 478 dma_free_coherent(priv->device,
479 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
480 rx_ring->dma_rx, rx_ring->dma_rx_phy);
481 goto error;
482 }
479 483
480 rx_ring->rx_skbuff = kmalloc_array(rx_rsize, 484 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
481 sizeof(struct sk_buff *), GFP_KERNEL); 485 sizeof(struct sk_buff *), GFP_KERNEL);
482 if (rx_ring->rx_skbuff == NULL) 486 if (!rx_ring->rx_skbuff) {
483 goto rxbuff_err; 487 kfree(rx_ring->rx_skbuff_dma);
488 goto error;
489 }
484 490
485 /* initialise the buffers */ 491 /* initialise the buffers */
486 for (desc_index = 0; desc_index < rx_rsize; desc_index++) { 492 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
@@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
502err_init_rx_buffers: 508err_init_rx_buffers:
503 while (--desc_index >= 0) 509 while (--desc_index >= 0)
504 free_rx_ring(priv->device, rx_ring, desc_index); 510 free_rx_ring(priv->device, rx_ring, desc_index);
505 kfree(rx_ring->rx_skbuff);
506rxbuff_err:
507 kfree(rx_ring->rx_skbuff_dma);
508dmamem_err:
509 dma_free_coherent(priv->device,
510 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
511 rx_ring->dma_rx, rx_ring->dma_rx_phy);
512error: 511error:
513 return -ENOMEM; 512 return -ENOMEM;
514} 513}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 866560ea9e18..b02eed12bfc5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
108 } 108 }
109 } 109 }
110 110
111 /* Get MAC address if available (DT) */
112 if (mac)
113 ether_addr_copy(priv->dev->dev_addr, mac);
114
115 priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); 111 priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
116 if (!priv) { 112 if (!priv) {
117 pr_err("%s: main driver probe failed\n", __func__); 113 pr_err("%s: main driver probe failed\n", __func__);
@@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
125 goto err_drv_remove; 121 goto err_drv_remove;
126 } 122 }
127 123
124 /* Get MAC address if available (DT) */
125 if (mac)
126 ether_addr_copy(priv->dev->dev_addr, mac);
127
128 /* Get the TX/RX IRQ numbers */ 128 /* Get the TX/RX IRQ numbers */
129 for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { 129 for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
130 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); 130 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 118a427d1942..8c6b7c1651e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1671 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1671 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1672 * file on failure. 1672 * file on failure.
1673 */ 1673 */
1674static int stmmac_hw_setup(struct net_device *dev) 1674static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1675{ 1675{
1676 struct stmmac_priv *priv = netdev_priv(dev); 1676 struct stmmac_priv *priv = netdev_priv(dev);
1677 int ret; 1677 int ret;
@@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev)
1708 1708
1709 stmmac_mmc_setup(priv); 1709 stmmac_mmc_setup(priv);
1710 1710
1711 ret = stmmac_init_ptp(priv); 1711 if (init_ptp) {
1712 if (ret && ret != -EOPNOTSUPP) 1712 ret = stmmac_init_ptp(priv);
1713 pr_warn("%s: failed PTP initialisation\n", __func__); 1713 if (ret && ret != -EOPNOTSUPP)
1714 pr_warn("%s: failed PTP initialisation\n", __func__);
1715 }
1714 1716
1715#ifdef CONFIG_DEBUG_FS 1717#ifdef CONFIG_DEBUG_FS
1716 ret = stmmac_init_fs(dev); 1718 ret = stmmac_init_fs(dev);
@@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev)
1787 goto init_error; 1789 goto init_error;
1788 } 1790 }
1789 1791
1790 ret = stmmac_hw_setup(dev); 1792 ret = stmmac_hw_setup(dev, true);
1791 if (ret < 0) { 1793 if (ret < 0) {
1792 pr_err("%s: Hw setup failed\n", __func__); 1794 pr_err("%s: Hw setup failed\n", __func__);
1793 goto init_error; 1795 goto init_error;
@@ -3036,7 +3038,7 @@ int stmmac_resume(struct net_device *ndev)
3036 netif_device_attach(ndev); 3038 netif_device_attach(ndev);
3037 3039
3038 init_dma_desc_rings(ndev, GFP_ATOMIC); 3040 init_dma_desc_rings(ndev, GFP_ATOMIC);
3039 stmmac_hw_setup(ndev); 3041 stmmac_hw_setup(ndev, false);
3040 stmmac_init_tx_coalesce(priv); 3042 stmmac_init_tx_coalesce(priv);
3041 3043
3042 napi_enable(&priv->napi); 3044 napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4032b170fe24..3039de2465ba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = {
430 .remove = stmmac_pltfr_remove, 430 .remove = stmmac_pltfr_remove,
431 .driver = { 431 .driver = {
432 .name = STMMAC_RESOURCE_NAME, 432 .name = STMMAC_RESOURCE_NAME,
433 .owner = THIS_MODULE,
434 .pm = &stmmac_pltfr_pm_ops, 433 .pm = &stmmac_pltfr_pm_ops,
435 .of_match_table = of_match_ptr(stmmac_dt_ids), 434 .of_match_table = of_match_ptr(stmmac_dt_ids),
436 }, 435 },
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 45c408ef67d0..d2835bf7b4fb 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1201 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 1201 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1202 if (IS_ERR(segs)) { 1202 if (IS_ERR(segs)) {
1203 dev->stats.tx_dropped++; 1203 dev->stats.tx_dropped++;
1204 dev_kfree_skb_any(skb);
1204 return NETDEV_TX_OK; 1205 return NETDEV_TX_OK;
1205 } 1206 }
1206 1207
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c560f9aeb55d..e068d48b0f21 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
610 610
611 /* Clear all mcast from ALE */ 611 /* Clear all mcast from ALE */
612 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << 612 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
613 priv->host_port); 613 priv->host_port, -1);
614 614
615 /* Flood All Unicast Packets to Host port */ 615 /* Flood All Unicast Packets to Host port */
616 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); 616 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
634static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 634static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
635{ 635{
636 struct cpsw_priv *priv = netdev_priv(ndev); 636 struct cpsw_priv *priv = netdev_priv(ndev);
637 int vid;
638
639 if (priv->data.dual_emac)
640 vid = priv->slaves[priv->emac_port].port_vlan;
641 else
642 vid = priv->data.default_vlan;
637 643
638 if (ndev->flags & IFF_PROMISC) { 644 if (ndev->flags & IFF_PROMISC) {
639 /* Enable promiscuous mode */ 645 /* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
649 cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); 655 cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
650 656
651 /* Clear all mcast from ALE */ 657 /* Clear all mcast from ALE */
652 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); 658 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
659 vid);
653 660
654 if (!netdev_mc_empty(ndev)) { 661 if (!netdev_mc_empty(ndev)) {
655 struct netdev_hw_addr *ha; 662 struct netdev_hw_addr *ha;
@@ -757,6 +764,14 @@ requeue:
757static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 764static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
758{ 765{
759 struct cpsw_priv *priv = dev_id; 766 struct cpsw_priv *priv = dev_id;
767 int value = irq - priv->irqs_table[0];
768
769 /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
770 * is to make sure we will always write the correct value to the EOI
771 * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
772 * for TX Interrupt and 3 for MISC Interrupt.
773 */
774 cpdma_ctlr_eoi(priv->dma, value);
760 775
761 cpsw_intr_disable(priv); 776 cpsw_intr_disable(priv);
762 if (priv->irq_enabled == true) { 777 if (priv->irq_enabled == true) {
@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
786 int num_tx, num_rx; 801 int num_tx, num_rx;
787 802
788 num_tx = cpdma_chan_process(priv->txch, 128); 803 num_tx = cpdma_chan_process(priv->txch, 128);
789 if (num_tx)
790 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
791 804
792 num_rx = cpdma_chan_process(priv->rxch, budget); 805 num_rx = cpdma_chan_process(priv->rxch, budget);
793 if (num_rx < budget) { 806 if (num_rx < budget) {
@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
795 808
796 napi_complete(napi); 809 napi_complete(napi);
797 cpsw_intr_enable(priv); 810 cpsw_intr_enable(priv);
798 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
799 prim_cpsw = cpsw_get_slave_priv(priv, 0); 811 prim_cpsw = cpsw_get_slave_priv(priv, 0);
800 if (prim_cpsw->irq_enabled == false) { 812 if (prim_cpsw->irq_enabled == false) {
801 prim_cpsw->irq_enabled = true; 813 prim_cpsw->irq_enabled = true;
@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
1310 napi_enable(&priv->napi); 1322 napi_enable(&priv->napi);
1311 cpdma_ctlr_start(priv->dma); 1323 cpdma_ctlr_start(priv->dma);
1312 cpsw_intr_enable(priv); 1324 cpsw_intr_enable(priv);
1313 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1314 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1315 1325
1316 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1326 prim_cpsw = cpsw_get_slave_priv(priv, 0);
1317 if (prim_cpsw->irq_enabled == false) { 1327 if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1578 cpdma_chan_start(priv->txch); 1588 cpdma_chan_start(priv->txch);
1579 cpdma_ctlr_int_ctrl(priv->dma, true); 1589 cpdma_ctlr_int_ctrl(priv->dma, true);
1580 cpsw_intr_enable(priv); 1590 cpsw_intr_enable(priv);
1581 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1582 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1583
1584} 1591}
1585 1592
1586static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1593static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
1620 cpsw_interrupt(ndev->irq, priv); 1627 cpsw_interrupt(ndev->irq, priv);
1621 cpdma_ctlr_int_ctrl(priv->dma, true); 1628 cpdma_ctlr_int_ctrl(priv->dma, true);
1622 cpsw_intr_enable(priv); 1629 cpsw_intr_enable(priv);
1623 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1624 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1625
1626} 1630}
1627#endif 1631#endif
1628 1632
@@ -1630,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1630 unsigned short vid) 1634 unsigned short vid)
1631{ 1635{
1632 int ret; 1636 int ret;
1633 int unreg_mcast_mask; 1637 int unreg_mcast_mask = 0;
1638 u32 port_mask;
1634 1639
1635 if (priv->ndev->flags & IFF_ALLMULTI) 1640 if (priv->data.dual_emac) {
1636 unreg_mcast_mask = ALE_ALL_PORTS; 1641 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
1637 else 1642
1638 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; 1643 if (priv->ndev->flags & IFF_ALLMULTI)
1644 unreg_mcast_mask = port_mask;
1645 } else {
1646 port_mask = ALE_ALL_PORTS;
1647
1648 if (priv->ndev->flags & IFF_ALLMULTI)
1649 unreg_mcast_mask = ALE_ALL_PORTS;
1650 else
1651 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1652 }
1639 1653
1640 ret = cpsw_ale_add_vlan(priv->ale, vid, 1654 ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
1641 ALE_ALL_PORTS << priv->host_port,
1642 0, ALE_ALL_PORTS << priv->host_port,
1643 unreg_mcast_mask << priv->host_port); 1655 unreg_mcast_mask << priv->host_port);
1644 if (ret != 0) 1656 if (ret != 0)
1645 return ret; 1657 return ret;
@@ -1650,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1650 goto clean_vid; 1662 goto clean_vid;
1651 1663
1652 ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1664 ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1653 ALE_ALL_PORTS << priv->host_port, 1665 port_mask, ALE_VLAN, vid, 0);
1654 ALE_VLAN, vid, 0);
1655 if (ret != 0) 1666 if (ret != 0)
1656 goto clean_vlan_ucast; 1667 goto clean_vlan_ucast;
1657 return 0; 1668 return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 097ebe7077ac..5246b3a18ff8 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
234 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); 234 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
235} 235}
236 236
237int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) 237int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
238{ 238{
239 u32 ale_entry[ALE_ENTRY_WORDS]; 239 u32 ale_entry[ALE_ENTRY_WORDS];
240 int ret, idx; 240 int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
245 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) 245 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
246 continue; 246 continue;
247 247
248 /* if vid passed is -1 then remove all multicast entry from
249 * the table irrespective of vlan id, if a valid vlan id is
250 * passed then remove only multicast added to that vlan id.
251 * if vlan id doesn't match then move on to next entry.
252 */
253 if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
254 continue;
255
248 if (cpsw_ale_get_mcast(ale_entry)) { 256 if (cpsw_ale_get_mcast(ale_entry)) {
249 u8 addr[6]; 257 u8 addr[6];
250 258
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index c0d4127aa549..af1e7ecd87c6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
92 92
93int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); 93int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
94int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); 94int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
95int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); 95int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
96int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, 96int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
97 int flags, u16 vid); 97 int flags, u16 vid);
98int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, 98int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ea712512c7d1..5fae4354722c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -62,6 +62,7 @@
62#include <linux/of.h> 62#include <linux/of.h>
63#include <linux/of_address.h> 63#include <linux/of_address.h>
64#include <linux/of_device.h> 64#include <linux/of_device.h>
65#include <linux/of_mdio.h>
65#include <linux/of_irq.h> 66#include <linux/of_irq.h>
66#include <linux/of_net.h> 67#include <linux/of_net.h>
67 68
@@ -343,9 +344,7 @@ struct emac_priv {
343 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; 344 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
344 u32 rx_addr_type; 345 u32 rx_addr_type;
345 const char *phy_id; 346 const char *phy_id;
346#ifdef CONFIG_OF
347 struct device_node *phy_node; 347 struct device_node *phy_node;
348#endif
349 struct phy_device *phydev; 348 struct phy_device *phydev;
350 spinlock_t lock; 349 spinlock_t lock;
351 /*platform specific members*/ 350 /*platform specific members*/
@@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv)
922 if (priv->int_disable) 921 if (priv->int_disable)
923 priv->int_disable(); 922 priv->int_disable();
924 923
924 /* NOTE: Rx Threshold and Misc interrupts are not enabled */
925
926 /* ack rxen only then a new pulse will be generated */
927 emac_write(EMAC_DM646X_MACEOIVECTOR,
928 EMAC_DM646X_MAC_EOI_C0_RXEN);
929
930 /* ack txen- only then a new pulse will be generated */
931 emac_write(EMAC_DM646X_MACEOIVECTOR,
932 EMAC_DM646X_MAC_EOI_C0_TXEN);
933
925 local_irq_restore(flags); 934 local_irq_restore(flags);
926 935
927 } else { 936 } else {
@@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv)
951 * register */ 960 * register */
952 961
953 /* NOTE: Rx Threshold and Misc interrupts are not enabled */ 962 /* NOTE: Rx Threshold and Misc interrupts are not enabled */
954
955 /* ack rxen only then a new pulse will be generated */
956 emac_write(EMAC_DM646X_MACEOIVECTOR,
957 EMAC_DM646X_MAC_EOI_C0_RXEN);
958
959 /* ack txen- only then a new pulse will be generated */
960 emac_write(EMAC_DM646X_MACEOIVECTOR,
961 EMAC_DM646X_MAC_EOI_C0_TXEN);
962
963 } else { 963 } else {
964 /* Set DM644x control registers for interrupt control */ 964 /* Set DM644x control registers for interrupt control */
965 emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); 965 emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
@@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev)
1537 int i = 0; 1537 int i = 0;
1538 struct emac_priv *priv = netdev_priv(ndev); 1538 struct emac_priv *priv = netdev_priv(ndev);
1539 1539
1540 pm_runtime_get(&priv->pdev->dev); 1540 ret = pm_runtime_get_sync(&priv->pdev->dev);
1541 if (ret < 0) {
1542 pm_runtime_put_noidle(&priv->pdev->dev);
1543 dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
1544 __func__, ret);
1545 return ret;
1546 }
1541 1547
1542 netif_carrier_off(ndev); 1548 netif_carrier_off(ndev);
1543 for (cnt = 0; cnt < ETH_ALEN; cnt++) 1549 for (cnt = 0; cnt < ETH_ALEN; cnt++)
@@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev)
1596 cpdma_ctlr_start(priv->dma); 1602 cpdma_ctlr_start(priv->dma);
1597 1603
1598 priv->phydev = NULL; 1604 priv->phydev = NULL;
1605
1606 if (priv->phy_node) {
1607 priv->phydev = of_phy_connect(ndev, priv->phy_node,
1608 &emac_adjust_link, 0, 0);
1609 if (!priv->phydev) {
1610 dev_err(emac_dev, "could not connect to phy %s\n",
1611 priv->phy_node->full_name);
1612 ret = -ENODEV;
1613 goto err;
1614 }
1615 }
1616
1599 /* use the first phy on the bus if pdata did not give us a phy id */ 1617 /* use the first phy on the bus if pdata did not give us a phy id */
1600 if (!priv->phy_id) { 1618 if (!priv->phydev && !priv->phy_id) {
1601 struct device *phy; 1619 struct device *phy;
1602 1620
1603 phy = bus_find_device(&mdio_bus_type, NULL, NULL, 1621 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev)
1606 priv->phy_id = dev_name(phy); 1624 priv->phy_id = dev_name(phy);
1607 } 1625 }
1608 1626
1609 if (priv->phy_id && *priv->phy_id) { 1627 if (!priv->phydev && priv->phy_id && *priv->phy_id) {
1610 priv->phydev = phy_connect(ndev, priv->phy_id, 1628 priv->phydev = phy_connect(ndev, priv->phy_id,
1611 &emac_adjust_link, 1629 &emac_adjust_link,
1612 PHY_INTERFACE_MODE_MII); 1630 PHY_INTERFACE_MODE_MII);
@@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev)
1627 "(mii_bus:phy_addr=%s, id=%x)\n", 1645 "(mii_bus:phy_addr=%s, id=%x)\n",
1628 priv->phydev->drv->name, dev_name(&priv->phydev->dev), 1646 priv->phydev->drv->name, dev_name(&priv->phydev->dev),
1629 priv->phydev->phy_id); 1647 priv->phydev->phy_id);
1630 } else { 1648 }
1649
1650 if (!priv->phydev) {
1631 /* No PHY , fix the link, speed and duplex settings */ 1651 /* No PHY , fix the link, speed and duplex settings */
1632 dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); 1652 dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
1633 priv->link = 1; 1653 priv->link = 1;
@@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
1724 struct emac_priv *priv = netdev_priv(ndev); 1744 struct emac_priv *priv = netdev_priv(ndev);
1725 u32 mac_control; 1745 u32 mac_control;
1726 u32 stats_clear_mask; 1746 u32 stats_clear_mask;
1747 int err;
1748
1749 err = pm_runtime_get_sync(&priv->pdev->dev);
1750 if (err < 0) {
1751 pm_runtime_put_noidle(&priv->pdev->dev);
1752 dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
1753 __func__, err);
1754 return &ndev->stats;
1755 }
1727 1756
1728 /* update emac hardware stats and reset the registers*/ 1757 /* update emac hardware stats and reset the registers*/
1729 1758
@@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
1766 ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); 1795 ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
1767 emac_write(EMAC_TXUNDERRUN, stats_clear_mask); 1796 emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
1768 1797
1798 pm_runtime_put(&priv->pdev->dev);
1799
1769 return &ndev->stats; 1800 return &ndev->stats;
1770} 1801}
1771 1802
@@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1859static int davinci_emac_probe(struct platform_device *pdev) 1890static int davinci_emac_probe(struct platform_device *pdev)
1860{ 1891{
1861 int rc = 0; 1892 int rc = 0;
1862 struct resource *res; 1893 struct resource *res, *res_ctrl;
1863 struct net_device *ndev; 1894 struct net_device *ndev;
1864 struct emac_priv *priv; 1895 struct emac_priv *priv;
1865 unsigned long hw_ram_addr; 1896 unsigned long hw_ram_addr;
@@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1876 return -EBUSY; 1907 return -EBUSY;
1877 } 1908 }
1878 emac_bus_frequency = clk_get_rate(emac_clk); 1909 emac_bus_frequency = clk_get_rate(emac_clk);
1910 devm_clk_put(&pdev->dev, emac_clk);
1879 1911
1880 /* TODO: Probe PHY here if possible */ 1912 /* TODO: Probe PHY here if possible */
1881 1913
@@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
1917 rc = PTR_ERR(priv->remap_addr); 1949 rc = PTR_ERR(priv->remap_addr);
1918 goto no_pdata; 1950 goto no_pdata;
1919 } 1951 }
1952
1953 res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1954 if (res_ctrl) {
1955 priv->ctrl_base =
1956 devm_ioremap_resource(&pdev->dev, res_ctrl);
1957 if (IS_ERR(priv->ctrl_base))
1958 goto no_pdata;
1959 } else {
1960 priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
1961 }
1962
1920 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; 1963 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
1921 ndev->base_addr = (unsigned long)priv->remap_addr; 1964 ndev->base_addr = (unsigned long)priv->remap_addr;
1922 1965
1923 priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
1924
1925 hw_ram_addr = pdata->hw_ram_addr; 1966 hw_ram_addr = pdata->hw_ram_addr;
1926 if (!hw_ram_addr) 1967 if (!hw_ram_addr)
1927 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; 1968 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
@@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev)
1980 ndev->ethtool_ops = &ethtool_ops; 2021 ndev->ethtool_ops = &ethtool_ops;
1981 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); 2022 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
1982 2023
2024 pm_runtime_enable(&pdev->dev);
2025 rc = pm_runtime_get_sync(&pdev->dev);
2026 if (rc < 0) {
2027 pm_runtime_put_noidle(&pdev->dev);
2028 dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
2029 __func__, rc);
2030 goto no_cpdma_chan;
2031 }
2032
1983 /* register the network device */ 2033 /* register the network device */
1984 SET_NETDEV_DEV(ndev, &pdev->dev); 2034 SET_NETDEV_DEV(ndev, &pdev->dev);
1985 rc = register_netdev(ndev); 2035 rc = register_netdev(ndev);
1986 if (rc) { 2036 if (rc) {
1987 dev_err(&pdev->dev, "error in register_netdev\n"); 2037 dev_err(&pdev->dev, "error in register_netdev\n");
1988 rc = -ENODEV; 2038 rc = -ENODEV;
2039 pm_runtime_put(&pdev->dev);
1989 goto no_cpdma_chan; 2040 goto no_cpdma_chan;
1990 } 2041 }
1991 2042
@@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1995 "(regs: %p, irq: %d)\n", 2046 "(regs: %p, irq: %d)\n",
1996 (void *)priv->emac_base_phys, ndev->irq); 2047 (void *)priv->emac_base_phys, ndev->irq);
1997 } 2048 }
1998 2049 pm_runtime_put(&pdev->dev);
1999 pm_runtime_enable(&pdev->dev);
2000 pm_runtime_resume(&pdev->dev);
2001 2050
2002 return 0; 2051 return 0;
2003 2052
@@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = {
2071 .hw_ram_addr = 0x01e20000, 2120 .hw_ram_addr = 0x01e20000,
2072}; 2121};
2073 2122
2123static const struct emac_platform_data dm816_emac_data = {
2124 .version = EMAC_VERSION_2,
2125};
2126
2074static const struct of_device_id davinci_emac_of_match[] = { 2127static const struct of_device_id davinci_emac_of_match[] = {
2075 {.compatible = "ti,davinci-dm6467-emac", }, 2128 {.compatible = "ti,davinci-dm6467-emac", },
2076 {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, 2129 {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
2130 {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
2077 {}, 2131 {},
2078}; 2132};
2079MODULE_DEVICE_TABLE(of, davinci_emac_of_match); 2133MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9c2d91ea0af4..dbcbf0c5bcfa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
1043 lp->regs = of_iomap(op->dev.of_node, 0); 1043 lp->regs = of_iomap(op->dev.of_node, 0);
1044 if (!lp->regs) { 1044 if (!lp->regs) {
1045 dev_err(&op->dev, "could not map temac regs.\n"); 1045 dev_err(&op->dev, "could not map temac regs.\n");
1046 rc = -ENOMEM;
1046 goto nodev; 1047 goto nodev;
1047 } 1048 }
1048 1049
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
1062 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); 1063 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
1063 if (!np) { 1064 if (!np) {
1064 dev_err(&op->dev, "could not find DMA node\n"); 1065 dev_err(&op->dev, "could not find DMA node\n");
1066 rc = -ENODEV;
1065 goto err_iounmap; 1067 goto err_iounmap;
1066 } 1068 }
1067 1069
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 44b8d2bad8c3..4c9b4fa1d3c1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -388,7 +388,6 @@ struct axidma_bd {
388 * @dma_err_tasklet: Tasklet structure to process Axi DMA errors 388 * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
389 * @tx_irq: Axidma TX IRQ number 389 * @tx_irq: Axidma TX IRQ number
390 * @rx_irq: Axidma RX IRQ number 390 * @rx_irq: Axidma RX IRQ number
391 * @temac_type: axienet type to identify between soft and hard temac
392 * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X 391 * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
393 * @options: AxiEthernet option word 392 * @options: AxiEthernet option word
394 * @last_link: Phy link state in which the PHY was negotiated earlier 393 * @last_link: Phy link state in which the PHY was negotiated earlier
@@ -431,7 +430,6 @@ struct axienet_local {
431 430
432 int tx_irq; 431 int tx_irq;
433 int rx_irq; 432 int rx_irq;
434 u32 temac_type;
435 u32 phy_type; 433 u32 phy_type;
436 434
437 u32 options; /* Current options word */ 435 u32 options; /* Current options word */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4ea2d4e6f1d1..a6d2860b712c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
1501 lp->regs = of_iomap(op->dev.of_node, 0); 1501 lp->regs = of_iomap(op->dev.of_node, 0);
1502 if (!lp->regs) { 1502 if (!lp->regs) {
1503 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1503 dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1504 ret = -ENOMEM;
1504 goto nodev; 1505 goto nodev;
1505 } 1506 }
1506 /* Setup checksum offload, but default to off if not specified */ 1507 /* Setup checksum offload, but default to off if not specified */
@@ -1555,10 +1556,6 @@ static int axienet_of_probe(struct platform_device *op)
1555 if ((be32_to_cpup(p)) >= 0x4000) 1556 if ((be32_to_cpup(p)) >= 0x4000)
1556 lp->jumbo_support = 1; 1557 lp->jumbo_support = 1;
1557 } 1558 }
1558 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1559 NULL);
1560 if (p)
1561 lp->temac_type = be32_to_cpup(p);
1562 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1559 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1563 if (p) 1560 if (p)
1564 lp->phy_type = be32_to_cpup(p); 1561 lp->phy_type = be32_to_cpup(p);
@@ -1567,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
1567 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1564 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1568 if (!np) { 1565 if (!np) {
1569 dev_err(&op->dev, "could not find DMA node\n"); 1566 dev_err(&op->dev, "could not find DMA node\n");
1567 ret = -ENODEV;
1570 goto err_iounmap; 1568 goto err_iounmap;
1571 } 1569 }
1572 lp->dma_regs = of_iomap(np, 0); 1570 lp->dma_regs = of_iomap(np, 0);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 24858799c204..9d4ce388510a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1109 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); 1109 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
1110 if (!res) { 1110 if (!res) {
1111 dev_err(dev, "no IRQ found\n"); 1111 dev_err(dev, "no IRQ found\n");
1112 rc = -ENXIO;
1112 goto error; 1113 goto error;
1113 } 1114 }
1114 1115
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2f48f790c9b4..384ca4f4de4a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -590,6 +590,7 @@ struct nvsp_message {
590 590
591 591
592#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 592#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
593#define NETVSC_SEND_BUFFER_ID 0
593 594
594#define NETVSC_PACKET_SIZE 4096 595#define NETVSC_PACKET_SIZE 4096
595 596
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index dd867e6cabd6..9f49c0129a78 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -161,8 +161,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
161 161
162 /* Deal with the send buffer we may have setup. 162 /* Deal with the send buffer we may have setup.
163 * If we got a send section size, it means we received a 163 * If we got a send section size, it means we received a
164 * SendsendBufferComplete msg (ie sent 164 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
165 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 165 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
166 * to send a revoke msg here 166 * to send a revoke msg here
167 */ 167 */
168 if (net_device->send_section_size) { 168 if (net_device->send_section_size) {
@@ -172,7 +172,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
172 172
173 revoke_packet->hdr.msg_type = 173 revoke_packet->hdr.msg_type =
174 NVSP_MSG1_TYPE_REVOKE_SEND_BUF; 174 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
175 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0; 175 revoke_packet->msg.v1_msg.revoke_send_buf.id =
176 NETVSC_SEND_BUFFER_ID;
176 177
177 ret = vmbus_sendpacket(net_device->dev->channel, 178 ret = vmbus_sendpacket(net_device->dev->channel,
178 revoke_packet, 179 revoke_packet,
@@ -204,7 +205,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
204 net_device->send_buf_gpadl_handle = 0; 205 net_device->send_buf_gpadl_handle = 0;
205 } 206 }
206 if (net_device->send_buf) { 207 if (net_device->send_buf) {
207 /* Free up the receive buffer */ 208 /* Free up the send buffer */
208 vfree(net_device->send_buf); 209 vfree(net_device->send_buf);
209 net_device->send_buf = NULL; 210 net_device->send_buf = NULL;
210 } 211 }
@@ -339,9 +340,9 @@ static int netvsc_init_buf(struct hv_device *device)
339 init_packet = &net_device->channel_init_pkt; 340 init_packet = &net_device->channel_init_pkt;
340 memset(init_packet, 0, sizeof(struct nvsp_message)); 341 memset(init_packet, 0, sizeof(struct nvsp_message));
341 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; 342 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
342 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle = 343 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
343 net_device->send_buf_gpadl_handle; 344 net_device->send_buf_gpadl_handle;
344 init_packet->msg.v1_msg.send_recv_buf.id = 0; 345 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
345 346
346 /* Send the gpadl notification request */ 347 /* Send the gpadl notification request */
347 ret = vmbus_sendpacket(device->channel, init_packet, 348 ret = vmbus_sendpacket(device->channel, init_packet,
@@ -364,7 +365,7 @@ static int netvsc_init_buf(struct hv_device *device)
364 netdev_err(ndev, "Unable to complete send buffer " 365 netdev_err(ndev, "Unable to complete send buffer "
365 "initialization with NetVsp - status %d\n", 366 "initialization with NetVsp - status %d\n",
366 init_packet->msg.v1_msg. 367 init_packet->msg.v1_msg.
367 send_recv_buf_complete.status); 368 send_send_buf_complete.status);
368 ret = -EINVAL; 369 ret = -EINVAL;
369 goto cleanup; 370 goto cleanup;
370 } 371 }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c530de1e63f5..3ad8ca76196d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -88,6 +88,7 @@ struct kszphy_priv {
88 88
89static const struct kszphy_type ksz8021_type = { 89static const struct kszphy_type ksz8021_type = {
90 .led_mode_reg = MII_KSZPHY_CTRL_2, 90 .led_mode_reg = MII_KSZPHY_CTRL_2,
91 .has_broadcast_disable = true,
91 .has_rmii_ref_clk_sel = true, 92 .has_rmii_ref_clk_sel = true,
92}; 93};
93 94
@@ -258,19 +259,6 @@ static int kszphy_config_init(struct phy_device *phydev)
258 return 0; 259 return 0;
259} 260}
260 261
261static int ksz8021_config_init(struct phy_device *phydev)
262{
263 int rc;
264
265 rc = kszphy_config_init(phydev);
266 if (rc)
267 return rc;
268
269 rc = kszphy_broadcast_disable(phydev);
270
271 return rc < 0 ? rc : 0;
272}
273
274static int ksz9021_load_values_from_of(struct phy_device *phydev, 262static int ksz9021_load_values_from_of(struct phy_device *phydev,
275 struct device_node *of_node, u16 reg, 263 struct device_node *of_node, u16 reg,
276 char *field1, char *field2, 264 char *field1, char *field2,
@@ -584,7 +572,7 @@ static struct phy_driver ksphy_driver[] = {
584 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 572 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
585 .driver_data = &ksz8021_type, 573 .driver_data = &ksz8021_type,
586 .probe = kszphy_probe, 574 .probe = kszphy_probe,
587 .config_init = ksz8021_config_init, 575 .config_init = kszphy_config_init,
588 .config_aneg = genphy_config_aneg, 576 .config_aneg = genphy_config_aneg,
589 .read_status = genphy_read_status, 577 .read_status = genphy_read_status,
590 .ack_interrupt = kszphy_ack_interrupt, 578 .ack_interrupt = kszphy_ack_interrupt,
@@ -601,7 +589,7 @@ static struct phy_driver ksphy_driver[] = {
601 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 589 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
602 .driver_data = &ksz8021_type, 590 .driver_data = &ksz8021_type,
603 .probe = kszphy_probe, 591 .probe = kszphy_probe,
604 .config_init = ksz8021_config_init, 592 .config_init = kszphy_config_init,
605 .config_aneg = genphy_config_aneg, 593 .config_aneg = genphy_config_aneg,
606 .read_status = genphy_read_status, 594 .read_status = genphy_read_status,
607 .ack_interrupt = kszphy_ack_interrupt, 595 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 93e224217e24..f7ff493f1e73 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
629static void team_notify_peers_work(struct work_struct *work) 629static void team_notify_peers_work(struct work_struct *work)
630{ 630{
631 struct team *team; 631 struct team *team;
632 int val;
632 633
633 team = container_of(work, struct team, notify_peers.dw.work); 634 team = container_of(work, struct team, notify_peers.dw.work);
634 635
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
636 schedule_delayed_work(&team->notify_peers.dw, 0); 637 schedule_delayed_work(&team->notify_peers.dw, 0);
637 return; 638 return;
638 } 639 }
640 val = atomic_dec_if_positive(&team->notify_peers.count_pending);
641 if (val < 0) {
642 rtnl_unlock();
643 return;
644 }
639 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); 645 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
640 rtnl_unlock(); 646 rtnl_unlock();
641 if (!atomic_dec_and_test(&team->notify_peers.count_pending)) 647 if (val)
642 schedule_delayed_work(&team->notify_peers.dw, 648 schedule_delayed_work(&team->notify_peers.dw,
643 msecs_to_jiffies(team->notify_peers.interval)); 649 msecs_to_jiffies(team->notify_peers.interval));
644} 650}
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
669static void team_mcast_rejoin_work(struct work_struct *work) 675static void team_mcast_rejoin_work(struct work_struct *work)
670{ 676{
671 struct team *team; 677 struct team *team;
678 int val;
672 679
673 team = container_of(work, struct team, mcast_rejoin.dw.work); 680 team = container_of(work, struct team, mcast_rejoin.dw.work);
674 681
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
676 schedule_delayed_work(&team->mcast_rejoin.dw, 0); 683 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
677 return; 684 return;
678 } 685 }
686 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
687 if (val < 0) {
688 rtnl_unlock();
689 return;
690 }
679 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); 691 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
680 rtnl_unlock(); 692 rtnl_unlock();
681 if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) 693 if (val)
682 schedule_delayed_work(&team->mcast_rejoin.dw, 694 schedule_delayed_work(&team->mcast_rejoin.dw,
683 msecs_to_jiffies(team->mcast_rejoin.interval)); 695 msecs_to_jiffies(team->mcast_rejoin.interval));
684} 696}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index dcb6d33141e0..1e9cdca37014 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
1276 awd.done = 0; 1276 awd.done = 0;
1277 1277
1278 urb->context = &awd; 1278 urb->context = &awd;
1279 status = usb_submit_urb(urb, GFP_NOIO); 1279 status = usb_submit_urb(urb, GFP_ATOMIC);
1280 if (status) { 1280 if (status) {
1281 // something went wrong 1281 // something went wrong
1282 usb_free_urb(urb); 1282 usb_free_urb(urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b8a82b86f909..602dc6668c3a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
56/* default ethernet address used by the modem */ 56/* default ethernet address used by the modem */
57static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; 57static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
58 58
59static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
60
59/* Make up an ethernet header if the packet doesn't have one. 61/* Make up an ethernet header if the packet doesn't have one.
60 * 62 *
61 * A firmware bug common among several devices cause them to send raw 63 * A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
332 usb_driver_release_interface(driver, info->data); 334 usb_driver_release_interface(driver, info->data);
333 } 335 }
334 336
335 /* Never use the same address on both ends of the link, even 337 /* Never use the same address on both ends of the link, even if the
336 * if the buggy firmware told us to. 338 * buggy firmware told us to. Or, if device is assigned the well-known
339 * buggy firmware MAC address, replace it with a random address,
337 */ 340 */
338 if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) 341 if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
342 ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
339 eth_hw_addr_random(dev->net); 343 eth_hw_addr_random(dev->net);
340 344
341 /* make MAC addr easily distinguishable from an IP header */ 345 /* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2d1c77e81836..bf405f134d3a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
833 index &= ~3; 833 index &= ~3;
834 } 834 }
835 835
836 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
837
838 data |= __le32_to_cpu(tmp) & ~mask;
839 tmp = __cpu_to_le32(data); 836 tmp = __cpu_to_le32(data);
840 837
841 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); 838 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
874 index &= ~3; 871 index &= ~3;
875 } 872 }
876 873
877 generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
878
879 data |= __le32_to_cpu(tmp) & ~mask;
880 tmp = __cpu_to_le32(data); 874 tmp = __cpu_to_le32(data);
881 875
882 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); 876 generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data)
926 ocp_reg_write(tp, OCP_SRAM_DATA, data); 920 ocp_reg_write(tp, OCP_SRAM_DATA, data);
927} 921}
928 922
929static u16 sram_read(struct r8152 *tp, u16 addr)
930{
931 ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
932 return ocp_reg_read(tp, OCP_SRAM_DATA);
933}
934
935static int read_mii_word(struct net_device *netdev, int phy_id, int reg) 923static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
936{ 924{
937 struct r8152 *tp = netdev_priv(netdev); 925 struct r8152 *tp = netdev_priv(netdev);
@@ -1897,6 +1885,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
1897 netif_wake_queue(netdev); 1885 netif_wake_queue(netdev);
1898} 1886}
1899 1887
1888static netdev_features_t
1889rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
1890 netdev_features_t features)
1891{
1892 u32 mss = skb_shinfo(skb)->gso_size;
1893 int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
1894 int offset = skb_transport_offset(skb);
1895
1896 if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
1897 features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
1898 else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
1899 features &= ~NETIF_F_GSO_MASK;
1900
1901 return features;
1902}
1903
1900static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 1904static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1901 struct net_device *netdev) 1905 struct net_device *netdev)
1902{ 1906{
@@ -2502,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2502 data = ocp_reg_read(tp, OCP_POWER_CFG); 2506 data = ocp_reg_read(tp, OCP_POWER_CFG);
2503 data |= EN_10M_PLLOFF; 2507 data |= EN_10M_PLLOFF;
2504 ocp_reg_write(tp, OCP_POWER_CFG, data); 2508 ocp_reg_write(tp, OCP_POWER_CFG, data);
2505 data = sram_read(tp, SRAM_IMPEDANCE); 2509 sram_write(tp, SRAM_IMPEDANCE, 0x0b13);
2506 data &= ~RX_DRIVING_MASK;
2507 sram_write(tp, SRAM_IMPEDANCE, data);
2508 2510
2509 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); 2511 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
2510 ocp_data |= PFM_PWM_SWITCH; 2512 ocp_data |= PFM_PWM_SWITCH;
2511 ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); 2513 ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
2512 2514
2513 data = sram_read(tp, SRAM_LPF_CFG); 2515 /* Enable LPF corner auto tune */
2514 data |= LPF_AUTO_TUNE; 2516 sram_write(tp, SRAM_LPF_CFG, 0xf70f);
2515 sram_write(tp, SRAM_LPF_CFG, data);
2516 2517
2517 data = sram_read(tp, SRAM_10M_AMP1); 2518 /* Adjust 10M Amplitude */
2518 data |= GDAC_IB_UPALL; 2519 sram_write(tp, SRAM_10M_AMP1, 0x00af);
2519 sram_write(tp, SRAM_10M_AMP1, data); 2520 sram_write(tp, SRAM_10M_AMP2, 0x0208);
2520 data = sram_read(tp, SRAM_10M_AMP2);
2521 data |= AMP_DN;
2522 sram_write(tp, SRAM_10M_AMP2, data);
2523 2521
2524 set_bit(PHY_RESET, &tp->flags); 2522 set_bit(PHY_RESET, &tp->flags);
2525} 2523}
@@ -3706,6 +3704,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
3706 .ndo_set_mac_address = rtl8152_set_mac_address, 3704 .ndo_set_mac_address = rtl8152_set_mac_address,
3707 .ndo_change_mtu = rtl8152_change_mtu, 3705 .ndo_change_mtu = rtl8152_change_mtu,
3708 .ndo_validate_addr = eth_validate_addr, 3706 .ndo_validate_addr = eth_validate_addr,
3707 .ndo_features_check = rtl8152_features_check,
3709}; 3708};
3710 3709
3711static void r8152b_get_version(struct r8152 *tp) 3710static void r8152b_get_version(struct r8152 *tp)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8bd7191572d..5ca97713bfb3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -760,7 +760,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
760 container_of(napi, struct receive_queue, napi); 760 container_of(napi, struct receive_queue, napi);
761 unsigned int r, received = 0; 761 unsigned int r, received = 0;
762 762
763again:
764 received += virtnet_receive(rq, budget - received); 763 received += virtnet_receive(rq, budget - received);
765 764
766 /* Out of packets? */ 765 /* Out of packets? */
@@ -771,7 +770,6 @@ again:
771 napi_schedule_prep(napi)) { 770 napi_schedule_prep(napi)) {
772 virtqueue_disable_cb(rq->vq); 771 virtqueue_disable_cb(rq->vq);
773 __napi_schedule(napi); 772 __napi_schedule(napi);
774 goto again;
775 } 773 }
776 } 774 }
777 775
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 49d9f2291998..7fbd89fbe107 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1579 bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); 1579 bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
1580 1580
1581 skb = udp_tunnel_handle_offloads(skb, udp_sum); 1581 skb = udp_tunnel_handle_offloads(skb, udp_sum);
1582 if (IS_ERR(skb)) 1582 if (IS_ERR(skb)) {
1583 return -EINVAL; 1583 err = -EINVAL;
1584 goto err;
1585 }
1584 1586
1585 skb_scrub_packet(skb, xnet); 1587 skb_scrub_packet(skb, xnet);
1586 1588
@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1590 1592
1591 /* Need space for new headers (invalidates iph ptr) */ 1593 /* Need space for new headers (invalidates iph ptr) */
1592 err = skb_cow_head(skb, min_headroom); 1594 err = skb_cow_head(skb, min_headroom);
1593 if (unlikely(err)) 1595 if (unlikely(err)) {
1594 return err; 1596 kfree_skb(skb);
1597 goto err;
1598 }
1595 1599
1596 skb = vlan_hwaccel_push_inside(skb); 1600 skb = vlan_hwaccel_push_inside(skb);
1597 if (WARN_ON(!skb)) 1601 if (WARN_ON(!skb)) {
1598 return -ENOMEM; 1602 err = -ENOMEM;
1603 goto err;
1604 }
1599 1605
1600 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1606 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1601 vxh->vx_flags = htonl(VXLAN_FLAGS); 1607 vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1606 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, 1612 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
1607 ttl, src_port, dst_port); 1613 ttl, src_port, dst_port);
1608 return 0; 1614 return 0;
1615err:
1616 dst_release(dst);
1617 return err;
1609} 1618}
1610#endif 1619#endif
1611 1620
@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1621 1630
1622 skb = udp_tunnel_handle_offloads(skb, udp_sum); 1631 skb = udp_tunnel_handle_offloads(skb, udp_sum);
1623 if (IS_ERR(skb)) 1632 if (IS_ERR(skb))
1624 return -EINVAL; 1633 return PTR_ERR(skb);
1625 1634
1626 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1635 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1627 + VXLAN_HLEN + sizeof(struct iphdr) 1636 + VXLAN_HLEN + sizeof(struct iphdr)
@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1629 1638
1630 /* Need space for new headers (invalidates iph ptr) */ 1639 /* Need space for new headers (invalidates iph ptr) */
1631 err = skb_cow_head(skb, min_headroom); 1640 err = skb_cow_head(skb, min_headroom);
1632 if (unlikely(err)) 1641 if (unlikely(err)) {
1642 kfree_skb(skb);
1633 return err; 1643 return err;
1644 }
1634 1645
1635 skb = vlan_hwaccel_push_inside(skb); 1646 skb = vlan_hwaccel_push_inside(skb);
1636 if (WARN_ON(!skb)) 1647 if (WARN_ON(!skb))
@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1776 tos, ttl, df, src_port, dst_port, 1787 tos, ttl, df, src_port, dst_port,
1777 htonl(vni << 8), 1788 htonl(vni << 8),
1778 !net_eq(vxlan->net, dev_net(vxlan->dev))); 1789 !net_eq(vxlan->net, dev_net(vxlan->dev)));
1779 1790 if (err < 0) {
1780 if (err < 0) 1791 /* skb is already freed. */
1792 skb = NULL;
1781 goto rt_tx_error; 1793 goto rt_tx_error;
1794 }
1795
1782 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 1796 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1783#if IS_ENABLED(CONFIG_IPV6) 1797#if IS_ENABLED(CONFIG_IPV6)
1784 } else { 1798 } else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3c06e9365949..9880dae2a569 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -1070,7 +1070,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
1070 */ 1070 */
1071 if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && 1071 if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
1072 ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || 1072 ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
1073 (sdiodev->pdata->oob_irq_supported))) 1073 (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
1074 bus_if->wowl_supported = true; 1074 bus_if->wowl_supported = true;
1075#endif 1075#endif
1076 1076
@@ -1167,7 +1167,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
1167 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1167 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1168 1168
1169 brcmf_dbg(SDIO, "Enter\n"); 1169 brcmf_dbg(SDIO, "Enter\n");
1170 if (sdiodev->pdata->oob_irq_supported) 1170 if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
1171 disable_irq_wake(sdiodev->pdata->oob_irq_nr); 1171 disable_irq_wake(sdiodev->pdata->oob_irq_nr);
1172 brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS); 1172 brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
1173 atomic_set(&sdiodev->suspend, false); 1173 atomic_set(&sdiodev->suspend, false);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 91c0cb3c368e..21de4fe6cf2d 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -65,7 +65,8 @@ config IPW2100_DEBUG
65 65
66config IPW2200 66config IPW2200
67 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 67 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
68 depends on PCI && CFG80211 && CFG80211_WEXT 68 depends on PCI && CFG80211
69 select CFG80211_WEXT
69 select WIRELESS_EXT 70 select WIRELESS_EXT
70 select WEXT_SPY 71 select WEXT_SPY
71 select WEXT_PRIV 72 select WEXT_PRIV
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index e5be2d21868f..a5f9198d5747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,8 +69,8 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 10 72#define IWL7260_UCODE_API_MAX 12
73#define IWL3160_UCODE_API_MAX 10 73#define IWL3160_UCODE_API_MAX 12
74 74
75/* Oldest version we won't warn about */ 75/* Oldest version we won't warn about */
76#define IWL7260_UCODE_API_OK 10 76#define IWL7260_UCODE_API_OK 10
@@ -105,7 +105,7 @@
105#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 105#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
106 106
107#define IWL7265D_FW_PRE "iwlwifi-7265D-" 107#define IWL7265D_FW_PRE "iwlwifi-7265D-"
108#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 108#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
109 109
110#define NVM_HW_SECTION_NUM_FAMILY_7000 0 110#define NVM_HW_SECTION_NUM_FAMILY_7000 0
111 111
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index bf0a95cb7153..3668fc57e770 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 10 72#define IWL8000_UCODE_API_MAX 12
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 10 75#define IWL8000_UCODE_API_OK 10
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 38de1513e4de..850b85a47806 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1323,10 +1323,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1323 1323
1324 try_again: 1324 try_again:
1325 /* try next, if any */ 1325 /* try next, if any */
1326 kfree(pieces);
1327 release_firmware(ucode_raw); 1326 release_firmware(ucode_raw);
1328 if (iwl_request_firmware(drv, false)) 1327 if (iwl_request_firmware(drv, false))
1329 goto out_unbind; 1328 goto out_unbind;
1329 kfree(pieces);
1330 return; 1330 return;
1331 1331
1332 out_free_fw: 1332 out_free_fw:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9564ae173d06..1f7f15eb86da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
310#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) 310#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
311 311
312#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 312#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
313#define FH_MEM_TB_MAX_LENGTH (0x00020000)
313 314
314/* TFDB Area - TFDs buffer table */ 315/* TFDB Area - TFDs buffer table */
315#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) 316#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index f2a047f6bb3e..1bbe4fc47b97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag {
243 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. 243 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
244 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 244 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
245 * longer than the passive one, which is essential for fragmented scan. 245 * longer than the passive one, which is essential for fragmented scan.
246 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
247 * regardless of the band or the number of the probes. FW will calculate
248 * the actual dwell time.
246 */ 249 */
247enum iwl_ucode_tlv_api { 250enum iwl_ucode_tlv_api {
248 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), 251 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api {
253 IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), 256 IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
254 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 257 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
255 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 258 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
259 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
256}; 260};
257 261
258/** 262/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1f2acf47bfb2..201846de94e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
672 * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented 672 * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
673 * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report 673 * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
674 * and DS parameter set IEs into probe requests. 674 * and DS parameter set IEs into probe requests.
675 * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
675 */ 676 */
676enum iwl_mvm_lmac_scan_flags { 677enum iwl_mvm_lmac_scan_flags {
677 IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), 678 IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
681 IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), 682 IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
682 IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), 683 IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
683 IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), 684 IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
685 IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
684}; 686};
685 687
686enum iwl_scan_priority { 688enum iwl_scan_priority {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 31a5b3f4266c..e880f9d4717b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1004,8 +1004,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1004{ 1004{
1005 lockdep_assert_held(&mvm->mutex); 1005 lockdep_assert_held(&mvm->mutex);
1006 1006
1007 /* disallow low power states when the FW is down */ 1007 /*
1008 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1008 * Disallow low power states when the FW is down by taking
1009 * the UCODE_DOWN ref. in case of ongoing hw restart the
1010 * ref is already taken, so don't take it again.
1011 */
1012 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1013 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1009 1014
1010 /* async_handlers_wk is now blocked */ 1015 /* async_handlers_wk is now blocked */
1011 1016
@@ -1023,6 +1028,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1023 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1028 /* the fw is stopped, the aux sta is dead: clean up driver state */
1024 iwl_mvm_del_aux_sta(mvm); 1029 iwl_mvm_del_aux_sta(mvm);
1025 1030
1031 /*
1032 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1033 * won't be called in this case).
1034 */
1035 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1036
1026 mvm->ucode_loaded = false; 1037 mvm->ucode_loaded = false;
1027} 1038}
1028 1039
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index e5294d01181e..ec9a8e7bae1d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
171 * already included in the probe template, so we need to set only 171 * already included in the probe template, so we need to set only
172 * req->n_ssids - 1 bits in addition to the first bit. 172 * req->n_ssids - 1 bits in addition to the first bit.
173 */ 173 */
174static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) 174static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
175 enum ieee80211_band band, int n_ssids)
175{ 176{
177 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
178 return 10;
176 if (band == IEEE80211_BAND_2GHZ) 179 if (band == IEEE80211_BAND_2GHZ)
177 return 20 + 3 * (n_ssids + 1); 180 return 20 + 3 * (n_ssids + 1);
178 return 10 + 2 * (n_ssids + 1); 181 return 10 + 2 * (n_ssids + 1);
179} 182}
180 183
181static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) 184static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
185 enum ieee80211_band band)
182{ 186{
187 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
188 return 110;
183 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; 189 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
184} 190}
185 191
@@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
331 */ 337 */
332 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 338 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
333 u32 passive_dwell = 339 u32 passive_dwell =
334 iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); 340 iwl_mvm_get_passive_dwell(mvm,
341 IEEE80211_BAND_2GHZ);
335 params->max_out_time = passive_dwell; 342 params->max_out_time = passive_dwell;
336 } else { 343 } else {
337 params->passive_fragmented = true; 344 params->passive_fragmented = true;
@@ -348,8 +355,8 @@ not_bound:
348 params->dwell[band].passive = frag_passive_dwell; 355 params->dwell[band].passive = frag_passive_dwell;
349 else 356 else
350 params->dwell[band].passive = 357 params->dwell[band].passive =
351 iwl_mvm_get_passive_dwell(band); 358 iwl_mvm_get_passive_dwell(mvm, band);
352 params->dwell[band].active = iwl_mvm_get_active_dwell(band, 359 params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
353 n_ssids); 360 n_ssids);
354 } 361 }
355} 362}
@@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1448 1455
1449 if (iwl_mvm_scan_pass_all(mvm, req)) 1456 if (iwl_mvm_scan_pass_all(mvm, req))
1450 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; 1457 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
1458 else
1459 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
1451 1460
1452 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) 1461 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
1453 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; 1462 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4f15d9decc81..4333306ccdee 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
108 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 108 tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
109 } 109 }
110 110
111 /* tid_tspec will default to 0 = BE when QOS isn't enabled */ 111 /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
112 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; 112 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
113 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
114 else
115 ac = tid_to_mac80211_ac[0];
116
113 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << 117 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
114 TX_CMD_FLG_BT_PRIO_POS; 118 TX_CMD_FLG_BT_PRIO_POS;
115 119
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index e56e77ef5d2e..917431e30f74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
665 if (num_of_ant(mvm->fw->valid_rx_ant) == 1) 665 if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
666 return false; 666 return false;
667 667
668 if (!mvm->cfg->rx_with_siso_diversity) 668 if (mvm->cfg->rx_with_siso_diversity)
669 return false; 669 return false;
670 670
671 ieee80211_iterate_active_interfaces_atomic( 671 ieee80211_iterate_active_interfaces_atomic(
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3ee8e3848876..d5aadb00dd9e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
367 367
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
371 375
372/* 7265 Series */ 376/* 7265 Series */
373 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -523,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
523 else if (cfg == &iwl7265_n_cfg) 527 else if (cfg == &iwl7265_n_cfg)
524 cfg_7265d = &iwl7265d_n_cfg; 528 cfg_7265d = &iwl7265d_n_cfg;
525 if (cfg_7265d && 529 if (cfg_7265d &&
526 (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) 530 (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
527 cfg = cfg_7265d; 531 cfg = cfg_7265d;
532 iwl_trans->cfg = cfg_7265d;
533 }
528#endif 534#endif
529 535
530 pci_set_drvdata(pdev, iwl_trans); 536 pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d79a1f44b8e..523fe0c88dcb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -614,7 +614,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
614{ 614{
615 u8 *v_addr; 615 u8 *v_addr;
616 dma_addr_t p_addr; 616 dma_addr_t p_addr;
617 u32 offset, chunk_sz = section->len; 617 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
618 int ret = 0; 618 int ret = 0;
619 619
620 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 620 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
@@ -1012,16 +1012,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1012 /* Stop the device, and put it in low power state */ 1012 /* Stop the device, and put it in low power state */
1013 iwl_pcie_apm_stop(trans); 1013 iwl_pcie_apm_stop(trans);
1014 1014
1015 /* Upon stop, the APM issues an interrupt if HW RF kill is set. 1015 /* stop and reset the on-board processor */
1016 * Clean again the interrupt here 1016 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1017 udelay(20);
1018
1019 /*
1020 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1021 * This is a bug in certain verions of the hardware.
1022 * Certain devices also keep sending HW RF kill interrupt all
1023 * the time, unless the interrupt is ACKed even if the interrupt
1024 * should be masked. Re-ACK all the interrupts here.
1017 */ 1025 */
1018 spin_lock(&trans_pcie->irq_lock); 1026 spin_lock(&trans_pcie->irq_lock);
1019 iwl_disable_interrupts(trans); 1027 iwl_disable_interrupts(trans);
1020 spin_unlock(&trans_pcie->irq_lock); 1028 spin_unlock(&trans_pcie->irq_lock);
1021 1029
1022 /* stop and reset the on-board processor */
1023 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1024 udelay(20);
1025 1030
1026 /* clear all status bits */ 1031 /* clear all status bits */
1027 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1032 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 846a2e6e34d8..c70efb9a6e78 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -666,7 +666,8 @@ tx_status_ok:
666} 666}
667 667
668static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, 668static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
669 u8 *entry, int rxring_idx, int desc_idx) 669 struct sk_buff *new_skb, u8 *entry,
670 int rxring_idx, int desc_idx)
670{ 671{
671 struct rtl_priv *rtlpriv = rtl_priv(hw); 672 struct rtl_priv *rtlpriv = rtl_priv(hw);
672 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 673 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
674 u8 tmp_one = 1; 675 u8 tmp_one = 1;
675 struct sk_buff *skb; 676 struct sk_buff *skb;
676 677
678 if (likely(new_skb)) {
679 skb = new_skb;
680 goto remap;
681 }
677 skb = dev_alloc_skb(rtlpci->rxbuffersize); 682 skb = dev_alloc_skb(rtlpci->rxbuffersize);
678 if (!skb) 683 if (!skb)
679 return 0; 684 return 0;
680 rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
681 685
686remap:
682 /* just set skb->cb to mapping addr for pci_unmap_single use */ 687 /* just set skb->cb to mapping addr for pci_unmap_single use */
683 *((dma_addr_t *)skb->cb) = 688 *((dma_addr_t *)skb->cb) =
684 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), 689 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
686 bufferaddress = *((dma_addr_t *)skb->cb); 691 bufferaddress = *((dma_addr_t *)skb->cb);
687 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) 692 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
688 return 0; 693 return 0;
694 rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
689 if (rtlpriv->use_new_trx_flow) { 695 if (rtlpriv->use_new_trx_flow) {
690 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, 696 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
691 HW_DESC_RX_PREPARE, 697 HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
781 /*rx pkt */ 787 /*rx pkt */
782 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ 788 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
783 rtlpci->rx_ring[rxring_idx].idx]; 789 rtlpci->rx_ring[rxring_idx].idx];
790 struct sk_buff *new_skb;
784 791
785 if (rtlpriv->use_new_trx_flow) { 792 if (rtlpriv->use_new_trx_flow) {
786 rx_remained_cnt = 793 rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
807 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), 814 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
808 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); 815 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
809 816
817 /* get a new skb - if fail, old one will be reused */
818 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
819 if (unlikely(!new_skb)) {
820 pr_err("Allocation of new skb failed in %s\n",
821 __func__);
822 goto no_new;
823 }
810 if (rtlpriv->use_new_trx_flow) { 824 if (rtlpriv->use_new_trx_flow) {
811 buffer_desc = 825 buffer_desc =
812 &rtlpci->rx_ring[rxring_idx].buffer_desc 826 &rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
911 schedule_work(&rtlpriv->works.lps_change_work); 925 schedule_work(&rtlpriv->works.lps_change_work);
912 } 926 }
913end: 927end:
928 skb = new_skb;
929no_new:
914 if (rtlpriv->use_new_trx_flow) { 930 if (rtlpriv->use_new_trx_flow) {
915 _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, 931 _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
916 rxring_idx, 932 rxring_idx,
917 rtlpci->rx_ring[rxring_idx].idx); 933 rtlpci->rx_ring[rxring_idx].idx);
918 } else { 934 } else {
919 _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, 935 _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
936 rxring_idx,
920 rtlpci->rx_ring[rxring_idx].idx); 937 rtlpci->rx_ring[rxring_idx].idx);
921
922 if (rtlpci->rx_ring[rxring_idx].idx == 938 if (rtlpci->rx_ring[rxring_idx].idx ==
923 rtlpci->rxringcount - 1) 939 rtlpci->rxringcount - 1)
924 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, 940 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1307 rtlpci->rx_ring[rxring_idx].idx = 0; 1323 rtlpci->rx_ring[rxring_idx].idx = 0;
1308 for (i = 0; i < rtlpci->rxringcount; i++) { 1324 for (i = 0; i < rtlpci->rxringcount; i++) {
1309 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; 1325 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
1310 if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 1326 if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
1311 rxring_idx, i)) 1327 rxring_idx, i))
1312 return -ENOMEM; 1328 return -ENOMEM;
1313 } 1329 }
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1332 1348
1333 for (i = 0; i < rtlpci->rxringcount; i++) { 1349 for (i = 0; i < rtlpci->rxringcount; i++) {
1334 entry = &rtlpci->rx_ring[rxring_idx].desc[i]; 1350 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1335 if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 1351 if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
1336 rxring_idx, i)) 1352 rxring_idx, i))
1337 return -ENOMEM; 1353 return -ENOMEM;
1338 } 1354 }
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index efbaf2ae1999..794204e34fba 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
737 } 737 }
738 738
739 queue->remaining_credit = credit_bytes; 739 queue->remaining_credit = credit_bytes;
740 queue->credit_usec = credit_usec;
740 741
741 err = connect_rings(be, queue); 742 err = connect_rings(be, queue);
742 if (err) { 743 if (err) {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 22bcb4e12e2a..d8c10764f130 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -88,10 +88,8 @@ struct netfront_cb {
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90struct netfront_stats { 90struct netfront_stats {
91 u64 rx_packets; 91 u64 packets;
92 u64 tx_packets; 92 u64 bytes;
93 u64 rx_bytes;
94 u64 tx_bytes;
95 struct u64_stats_sync syncp; 93 struct u64_stats_sync syncp;
96}; 94};
97 95
@@ -160,7 +158,8 @@ struct netfront_info {
160 struct netfront_queue *queues; 158 struct netfront_queue *queues;
161 159
162 /* Statistics */ 160 /* Statistics */
163 struct netfront_stats __percpu *stats; 161 struct netfront_stats __percpu *rx_stats;
162 struct netfront_stats __percpu *tx_stats;
164 163
165 atomic_t rx_gso_checksum_fixup; 164 atomic_t rx_gso_checksum_fixup;
166}; 165};
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
565{ 564{
566 unsigned short id; 565 unsigned short id;
567 struct netfront_info *np = netdev_priv(dev); 566 struct netfront_info *np = netdev_priv(dev);
568 struct netfront_stats *stats = this_cpu_ptr(np->stats); 567 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
569 struct xen_netif_tx_request *tx; 568 struct xen_netif_tx_request *tx;
570 char *data = skb->data; 569 char *data = skb->data;
571 RING_IDX i; 570 RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
672 if (notify) 671 if (notify)
673 notify_remote_via_irq(queue->tx_irq); 672 notify_remote_via_irq(queue->tx_irq);
674 673
675 u64_stats_update_begin(&stats->syncp); 674 u64_stats_update_begin(&tx_stats->syncp);
676 stats->tx_bytes += skb->len; 675 tx_stats->bytes += skb->len;
677 stats->tx_packets++; 676 tx_stats->packets++;
678 u64_stats_update_end(&stats->syncp); 677 u64_stats_update_end(&tx_stats->syncp);
679 678
680 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 679 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
681 xennet_tx_buf_gc(queue); 680 xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
931static int handle_incoming_queue(struct netfront_queue *queue, 930static int handle_incoming_queue(struct netfront_queue *queue,
932 struct sk_buff_head *rxq) 931 struct sk_buff_head *rxq)
933{ 932{
934 struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); 933 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
935 int packets_dropped = 0; 934 int packets_dropped = 0;
936 struct sk_buff *skb; 935 struct sk_buff *skb;
937 936
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
952 continue; 951 continue;
953 } 952 }
954 953
955 u64_stats_update_begin(&stats->syncp); 954 u64_stats_update_begin(&rx_stats->syncp);
956 stats->rx_packets++; 955 rx_stats->packets++;
957 stats->rx_bytes += skb->len; 956 rx_stats->bytes += skb->len;
958 u64_stats_update_end(&stats->syncp); 957 u64_stats_update_end(&rx_stats->syncp);
959 958
960 /* Pass it up. */ 959 /* Pass it up. */
961 napi_gro_receive(&queue->napi, skb); 960 napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1079 int cpu; 1078 int cpu;
1080 1079
1081 for_each_possible_cpu(cpu) { 1080 for_each_possible_cpu(cpu) {
1082 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); 1081 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1082 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1083 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1083 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1084 unsigned int start; 1084 unsigned int start;
1085 1085
1086 do { 1086 do {
1087 start = u64_stats_fetch_begin_irq(&stats->syncp); 1087 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1088 tx_packets = tx_stats->packets;
1089 tx_bytes = tx_stats->bytes;
1090 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1088 1091
1089 rx_packets = stats->rx_packets; 1092 do {
1090 tx_packets = stats->tx_packets; 1093 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1091 rx_bytes = stats->rx_bytes; 1094 rx_packets = rx_stats->packets;
1092 tx_bytes = stats->tx_bytes; 1095 rx_bytes = rx_stats->bytes;
1093 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1096 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1094 1097
1095 tot->rx_packets += rx_packets; 1098 tot->rx_packets += rx_packets;
1096 tot->tx_packets += tx_packets; 1099 tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
1275#endif 1278#endif
1276}; 1279};
1277 1280
1281static void xennet_free_netdev(struct net_device *netdev)
1282{
1283 struct netfront_info *np = netdev_priv(netdev);
1284
1285 free_percpu(np->rx_stats);
1286 free_percpu(np->tx_stats);
1287 free_netdev(netdev);
1288}
1289
1278static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1290static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1279{ 1291{
1280 int err; 1292 int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1295 np->queues = NULL; 1307 np->queues = NULL;
1296 1308
1297 err = -ENOMEM; 1309 err = -ENOMEM;
1298 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1310 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1299 if (np->stats == NULL) 1311 if (np->rx_stats == NULL)
1312 goto exit;
1313 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1314 if (np->tx_stats == NULL)
1300 goto exit; 1315 goto exit;
1301 1316
1302 netdev->netdev_ops = &xennet_netdev_ops; 1317 netdev->netdev_ops = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1327 return netdev; 1342 return netdev;
1328 1343
1329 exit: 1344 exit:
1330 free_netdev(netdev); 1345 xennet_free_netdev(netdev);
1331 return ERR_PTR(err); 1346 return ERR_PTR(err);
1332} 1347}
1333 1348
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
1369 return 0; 1384 return 0;
1370 1385
1371 fail: 1386 fail:
1372 free_netdev(netdev); 1387 xennet_free_netdev(netdev);
1373 dev_set_drvdata(&dev->dev, NULL); 1388 dev_set_drvdata(&dev->dev, NULL);
1374 return err; 1389 return err;
1375} 1390}
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
2189 info->queues = NULL; 2204 info->queues = NULL;
2190 } 2205 }
2191 2206
2192 free_percpu(info->stats); 2207 xennet_free_netdev(info->netdev);
2193
2194 free_netdev(info->netdev);
2195 2208
2196 return 0; 2209 return 0;
2197} 2210}
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index e34da13885e8..27fa62ce6136 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1050,7 +1050,8 @@ static int miphy28lp_init(struct phy *phy)
1050 ret = miphy28lp_init_usb3(miphy_phy); 1050 ret = miphy28lp_init_usb3(miphy_phy);
1051 break; 1051 break;
1052 default: 1052 default:
1053 return -EINVAL; 1053 ret = -EINVAL;
1054 break;
1054 } 1055 }
1055 1056
1056 mutex_unlock(&miphy_dev->miphy_mutex); 1057 mutex_unlock(&miphy_dev->miphy_mutex);
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index c96e8183a8ff..efe724f97e02 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -29,10 +29,9 @@
29/** 29/**
30 * omap_control_pcie_pcs - set the PCS delay count 30 * omap_control_pcie_pcs - set the PCS delay count
31 * @dev: the control module device 31 * @dev: the control module device
32 * @id: index of the pcie PHY (should be 1 or 2)
33 * @delay: 8 bit delay value 32 * @delay: 8 bit delay value
34 */ 33 */
35void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) 34void omap_control_pcie_pcs(struct device *dev, u8 delay)
36{ 35{
37 u32 val; 36 u32 val;
38 struct omap_control_phy *control_phy; 37 struct omap_control_phy *control_phy;
@@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
55 54
56 val = readl(control_phy->pcie_pcs); 55 val = readl(control_phy->pcie_pcs);
57 val &= ~(OMAP_CTRL_PCIE_PCS_MASK << 56 val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
58 (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT)); 57 OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
59 val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); 58 val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
60 writel(val, control_phy->pcie_pcs); 59 writel(val, control_phy->pcie_pcs);
61} 60}
62EXPORT_SYMBOL_GPL(omap_control_pcie_pcs); 61EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fb02a67c9181..a2b08f3ccb03 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
244 else 244 else
245 data->num_phys = 3; 245 data->num_phys = 3;
246 246
247 if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy")) 247 if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
248 of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
248 data->disc_thresh = 3; 249 data->disc_thresh = 3;
249 else 250 else
250 data->disc_thresh = 2; 251 data->disc_thresh = 2;
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 1387b4d4afe3..465de2c800f2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -82,7 +82,6 @@ struct ti_pipe3 {
82 struct clk *refclk; 82 struct clk *refclk;
83 struct clk *div_clk; 83 struct clk *div_clk;
84 struct pipe3_dpll_map *dpll_map; 84 struct pipe3_dpll_map *dpll_map;
85 u8 id;
86}; 85};
87 86
88static struct pipe3_dpll_map dpll_map_usb[] = { 87static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -217,8 +216,13 @@ static int ti_pipe3_init(struct phy *x)
217 u32 val; 216 u32 val;
218 int ret = 0; 217 int ret = 0;
219 218
219 /*
220 * Set pcie_pcs register to 0x96 for proper functioning of phy
221 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
222 * 18-1804.
223 */
220 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { 224 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
221 omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1); 225 omap_control_pcie_pcs(phy->control_dev, 0x96);
222 return 0; 226 return 0;
223 } 227 }
224 228
@@ -347,8 +351,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
347 } 351 }
348 352
349 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 353 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
350 if (of_property_read_u8(node, "id", &phy->id) < 0)
351 phy->id = 1;
352 354
353 clk = devm_clk_get(phy->dev, "dpll_ref"); 355 clk = devm_clk_get(phy->dev, "dpll_ref");
354 if (IS_ERR(clk)) { 356 if (IS_ERR(clk)) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e4f65510c87e..89dca77ca038 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
1801 if (pctldev == NULL) 1801 if (pctldev == NULL)
1802 return; 1802 return;
1803 1803
1804 mutex_lock(&pinctrldev_list_mutex);
1805 mutex_lock(&pctldev->mutex); 1804 mutex_lock(&pctldev->mutex);
1806
1807 pinctrl_remove_device_debugfs(pctldev); 1805 pinctrl_remove_device_debugfs(pctldev);
1806 mutex_unlock(&pctldev->mutex);
1808 1807
1809 if (!IS_ERR(pctldev->p)) 1808 if (!IS_ERR(pctldev->p))
1810 pinctrl_put(pctldev->p); 1809 pinctrl_put(pctldev->p);
1811 1810
1811 mutex_lock(&pinctrldev_list_mutex);
1812 mutex_lock(&pctldev->mutex);
1812 /* TODO: check that no pinmuxes are still active? */ 1813 /* TODO: check that no pinmuxes are still active? */
1813 list_del(&pctldev->node); 1814 list_del(&pctldev->node);
1814 /* Destroy descriptor tree */ 1815 /* Destroy descriptor tree */
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ba74f0aa60c7..43eacc924b7e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -89,6 +89,7 @@ struct rockchip_iomux {
89 * @reg_pull: optional separate register for additional pull settings 89 * @reg_pull: optional separate register for additional pull settings
90 * @clk: clock of the gpio bank 90 * @clk: clock of the gpio bank
91 * @irq: interrupt of the gpio bank 91 * @irq: interrupt of the gpio bank
92 * @saved_enables: Saved content of GPIO_INTEN at suspend time.
92 * @pin_base: first pin number 93 * @pin_base: first pin number
93 * @nr_pins: number of pins in this bank 94 * @nr_pins: number of pins in this bank
94 * @name: name of the bank 95 * @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
107 struct regmap *regmap_pull; 108 struct regmap *regmap_pull;
108 struct clk *clk; 109 struct clk *clk;
109 int irq; 110 int irq;
111 u32 saved_enables;
110 u32 pin_base; 112 u32 pin_base;
111 u8 nr_pins; 113 u8 nr_pins;
112 char *name; 114 char *name;
@@ -1396,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
1396{ 1398{
1397 struct irq_chip *chip = irq_get_chip(irq); 1399 struct irq_chip *chip = irq_get_chip(irq);
1398 struct rockchip_pin_bank *bank = irq_get_handler_data(irq); 1400 struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
1399 u32 polarity = 0, data = 0;
1400 u32 pend; 1401 u32 pend;
1401 bool edge_changed = false;
1402 unsigned long flags;
1403 1402
1404 dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name); 1403 dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
1405 1404
@@ -1407,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
1407 1406
1408 pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS); 1407 pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
1409 1408
1410 if (bank->toggle_edge_mode) {
1411 polarity = readl_relaxed(bank->reg_base +
1412 GPIO_INT_POLARITY);
1413 data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
1414 }
1415
1416 while (pend) { 1409 while (pend) {
1417 unsigned int virq; 1410 unsigned int virq;
1418 1411
@@ -1432,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
1432 * needs manual intervention. 1425 * needs manual intervention.
1433 */ 1426 */
1434 if (bank->toggle_edge_mode & BIT(irq)) { 1427 if (bank->toggle_edge_mode & BIT(irq)) {
1435 if (data & BIT(irq)) 1428 u32 data, data_old, polarity;
1436 polarity &= ~BIT(irq); 1429 unsigned long flags;
1437 else
1438 polarity |= BIT(irq);
1439 1430
1440 edge_changed = true; 1431 data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
1441 } 1432 do {
1433 spin_lock_irqsave(&bank->slock, flags);
1442 1434
1443 generic_handle_irq(virq); 1435 polarity = readl_relaxed(bank->reg_base +
1444 } 1436 GPIO_INT_POLARITY);
1437 if (data & BIT(irq))
1438 polarity &= ~BIT(irq);
1439 else
1440 polarity |= BIT(irq);
1441 writel(polarity,
1442 bank->reg_base + GPIO_INT_POLARITY);
1445 1443
1446 if (bank->toggle_edge_mode && edge_changed) { 1444 spin_unlock_irqrestore(&bank->slock, flags);
1447 /* Interrupt params should only be set with ints disabled */
1448 spin_lock_irqsave(&bank->slock, flags);
1449 1445
1450 data = readl_relaxed(bank->reg_base + GPIO_INTEN); 1446 data_old = data;
1451 writel_relaxed(0, bank->reg_base + GPIO_INTEN); 1447 data = readl_relaxed(bank->reg_base +
1452 writel(polarity, bank->reg_base + GPIO_INT_POLARITY); 1448 GPIO_EXT_PORT);
1453 writel(data, bank->reg_base + GPIO_INTEN); 1449 } while ((data & BIT(irq)) != (data_old & BIT(irq)));
1450 }
1454 1451
1455 spin_unlock_irqrestore(&bank->slock, flags); 1452 generic_handle_irq(virq);
1456 } 1453 }
1457 1454
1458 chained_irq_exit(chip, desc); 1455 chained_irq_exit(chip, desc);
@@ -1543,6 +1540,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
1543 return 0; 1540 return 0;
1544} 1541}
1545 1542
1543static void rockchip_irq_suspend(struct irq_data *d)
1544{
1545 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
1546 struct rockchip_pin_bank *bank = gc->private;
1547
1548 bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
1549 irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
1550}
1551
1552static void rockchip_irq_resume(struct irq_data *d)
1553{
1554 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
1555 struct rockchip_pin_bank *bank = gc->private;
1556
1557 irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
1558}
1559
1560static void rockchip_irq_disable(struct irq_data *d)
1561{
1562 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
1563 u32 val;
1564
1565 irq_gc_lock(gc);
1566
1567 val = irq_reg_readl(gc, GPIO_INTEN);
1568 val &= ~d->mask;
1569 irq_reg_writel(gc, val, GPIO_INTEN);
1570
1571 irq_gc_unlock(gc);
1572}
1573
1574static void rockchip_irq_enable(struct irq_data *d)
1575{
1576 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
1577 u32 val;
1578
1579 irq_gc_lock(gc);
1580
1581 val = irq_reg_readl(gc, GPIO_INTEN);
1582 val |= d->mask;
1583 irq_reg_writel(gc, val, GPIO_INTEN);
1584
1585 irq_gc_unlock(gc);
1586}
1587
1546static int rockchip_interrupts_register(struct platform_device *pdev, 1588static int rockchip_interrupts_register(struct platform_device *pdev,
1547 struct rockchip_pinctrl *info) 1589 struct rockchip_pinctrl *info)
1548{ 1590{
@@ -1581,12 +1623,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
1581 gc = irq_get_domain_generic_chip(bank->domain, 0); 1623 gc = irq_get_domain_generic_chip(bank->domain, 0);
1582 gc->reg_base = bank->reg_base; 1624 gc->reg_base = bank->reg_base;
1583 gc->private = bank; 1625 gc->private = bank;
1584 gc->chip_types[0].regs.mask = GPIO_INTEN; 1626 gc->chip_types[0].regs.mask = GPIO_INTMASK;
1585 gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; 1627 gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
1586 gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; 1628 gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
1587 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 1629 gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
1588 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 1630 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
1631 gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
1632 gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
1589 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 1633 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
1634 gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
1635 gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
1590 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; 1636 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
1591 gc->wake_enabled = IRQ_MSK(bank->nr_pins); 1637 gc->wake_enabled = IRQ_MSK(bank->nr_pins);
1592 1638
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 7c9d51382248..9e5ec00084bb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
1012 struct seq_file *s, unsigned pin_id) 1012 struct seq_file *s, unsigned pin_id)
1013{ 1013{
1014 unsigned long config; 1014 unsigned long config;
1015 st_pinconf_get(pctldev, pin_id, &config);
1016 1015
1016 mutex_unlock(&pctldev->mutex);
1017 st_pinconf_get(pctldev, pin_id, &config);
1018 mutex_lock(&pctldev->mutex);
1017 seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" 1019 seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
1018 "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," 1020 "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
1019 "de:%ld,rt-clk:%ld,rt-delay:%ld]", 1021 "de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
1443 1445
1444static struct irq_chip st_gpio_irqchip = { 1446static struct irq_chip st_gpio_irqchip = {
1445 .name = "GPIO", 1447 .name = "GPIO",
1448 .irq_disable = st_gpio_irq_mask,
1446 .irq_mask = st_gpio_irq_mask, 1449 .irq_mask = st_gpio_irq_mask,
1447 .irq_unmask = st_gpio_irq_unmask, 1450 .irq_unmask = st_gpio_irq_unmask,
1448 .irq_set_type = st_gpio_irq_set_type, 1451 .irq_set_type = st_gpio_irq_set_type,
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index c5cef59f5965..779950c62e53 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
798 798
799 /* load the gpio chip */ 799 /* load the gpio chip */
800 xway_chip.dev = &pdev->dev; 800 xway_chip.dev = &pdev->dev;
801 of_gpiochip_add(&xway_chip);
802 ret = gpiochip_add(&xway_chip); 801 ret = gpiochip_add(&xway_chip);
803 if (ret) { 802 if (ret) {
804 of_gpiochip_remove(&xway_chip);
805 dev_err(&pdev->dev, "Failed to register gpio chip\n"); 803 dev_err(&pdev->dev, "Failed to register gpio chip\n");
806 return ret; 804 return ret;
807 } 805 }
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e730935fa457..ed7017df065d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
865 865
866static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) 866static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
867{ 867{
868 int i = 0; 868 int i;
869 const struct msm_function *func = pctrl->soc->functions; 869 const struct msm_function *func = pctrl->soc->functions;
870 870
871 for (; i <= pctrl->soc->nfunctions; i++) 871 for (i = 0; i < pctrl->soc->nfunctions; i++)
872 if (!strcmp(func[i].name, "ps_hold")) { 872 if (!strcmp(func[i].name, "ps_hold")) {
873 pctrl->restart_nb.notifier_call = msm_ps_hold_restart; 873 pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
874 pctrl->restart_nb.priority = 128; 874 pctrl->restart_nb.priority = 128;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index c71443c4f265..97b5e4ee1ca4 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ 1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
1044 RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
1044 RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */ 1045 RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
1045 {} 1046 {}
1046}; 1047};
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index c1444c3d84c2..2809ae0d6bcd 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -570,7 +570,7 @@ static struct regulator_ops s2mps14_reg_ops = {
570 .enable_mask = S2MPS14_ENABLE_MASK \ 570 .enable_mask = S2MPS14_ENABLE_MASK \
571} 571}
572 572
573#define regulator_desc_s2mps14_buck(num, min, step) { \ 573#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
574 .name = "BUCK"#num, \ 574 .name = "BUCK"#num, \
575 .id = S2MPS14_BUCK##num, \ 575 .id = S2MPS14_BUCK##num, \
576 .ops = &s2mps14_reg_ops, \ 576 .ops = &s2mps14_reg_ops, \
@@ -579,7 +579,7 @@ static struct regulator_ops s2mps14_reg_ops = {
579 .min_uV = min, \ 579 .min_uV = min, \
580 .uV_step = step, \ 580 .uV_step = step, \
581 .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ 581 .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
582 .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \ 582 .linear_min_sel = min_sel, \
583 .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \ 583 .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
584 .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \ 584 .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
585 .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ 585 .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
@@ -613,11 +613,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
613 regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV), 613 regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
614 regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV), 614 regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
615 regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV), 615 regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
616 regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV), 616 regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
617 regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV), 617 S2MPS14_BUCK1235_START_SEL),
618 regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV), 618 regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
619 regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV), 619 S2MPS14_BUCK1235_START_SEL),
620 regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV), 620 regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
621 S2MPS14_BUCK1235_START_SEL),
622 regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
623 S2MPS14_BUCK4_START_SEL),
624 regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
625 S2MPS14_BUCK1235_START_SEL),
621}; 626};
622 627
623static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11, 628static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index eebc52cb6984..3d95c87160b3 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
102 goto err_alloc; 102 goto err_alloc;
103 } 103 }
104 104
105 spin_lock_init(&data->lock);
106
105 data->rcdev.owner = THIS_MODULE; 107 data->rcdev.owner = THIS_MODULE;
106 data->rcdev.nr_resets = size * 32; 108 data->rcdev.nr_resets = size * 32;
107 data->rcdev.ops = &sunxi_reset_ops; 109 data->rcdev.ops = &sunxi_reset_ops;
@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
157 if (IS_ERR(data->membase)) 159 if (IS_ERR(data->membase))
158 return PTR_ERR(data->membase); 160 return PTR_ERR(data->membase);
159 161
162 spin_lock_init(&data->lock);
163
160 data->rcdev.owner = THIS_MODULE; 164 data->rcdev.owner = THIS_MODULE;
161 data->rcdev.nr_resets = resource_size(res) * 32; 165 data->rcdev.nr_resets = resource_size(res) * 32;
162 data->rcdev.ops = &sunxi_reset_ops; 166 data->rcdev.ops = &sunxi_reset_ops;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91e97ec01418..4d41bf75c233 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
1163 */ 1163 */
1164static inline int ap_test_config_domain(unsigned int domain) 1164static inline int ap_test_config_domain(unsigned int domain)
1165{ 1165{
1166 if (!ap_configuration) 1166 if (!ap_configuration) /* QCI not supported */
1167 return 1; 1167 if (domain < 16)
1168 return ap_test_config(ap_configuration->aqm, domain); 1168 return 1; /* then domains 0...15 are configured */
1169 else
1170 return 0;
1171 else
1172 return ap_test_config(ap_configuration->aqm, domain);
1169} 1173}
1170 1174
1171/** 1175/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3b73b96619e2..26270c351624 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.16" 42#define DRV_VERSION "1.6.0.17"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2097de42a147..155b286f1a9d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
1892 goto fnic_abort_cmd_end; 1892 goto fnic_abort_cmd_end;
1893 } 1893 }
1894 1894
1895 /* IO out of order */
1896
1897 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1898 spin_unlock_irqrestore(io_lock, flags);
1899 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1900 "Issuing Host reset due to out of order IO\n");
1901
1902 if (fnic_host_reset(sc) == FAILED) {
1903 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1904 "fnic_host_reset failed.\n");
1905 }
1906 ret = FAILED;
1907 goto fnic_abort_cmd_end;
1908 }
1909
1895 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1910 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1896 1911
1897 /* 1912 /*
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ca291c1380..cce1cbc1a927 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
734 * Return target busy if we've received a non-zero retry_delay_timer 734 * Return target busy if we've received a non-zero retry_delay_timer
735 * in a FCP_RSP. 735 * in a FCP_RSP.
736 */ 736 */
737 if (time_after(jiffies, fcport->retry_delay_timestamp)) 737 if (fcport->retry_delay_timestamp == 0) {
738 /* retry delay not set */
739 } else if (time_after(jiffies, fcport->retry_delay_timestamp))
738 fcport->retry_delay_timestamp = 0; 740 fcport->retry_delay_timestamp = 0;
739 else 741 else
740 goto qc24_target_busy; 742 goto qc24_target_busy;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e42fff6e8c10..8afb01604d51 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1041,7 +1041,7 @@ retry:
1041 } 1041 }
1042 /* signal not to enter either branch of the if () below */ 1042 /* signal not to enter either branch of the if () below */
1043 timeleft = 0; 1043 timeleft = 0;
1044 rtn = NEEDS_RETRY; 1044 rtn = FAILED;
1045 } else { 1045 } else {
1046 timeleft = wait_for_completion_timeout(&done, timeout); 1046 timeleft = wait_for_completion_timeout(&done, timeout);
1047 rtn = SUCCESS; 1047 rtn = SUCCESS;
@@ -1081,7 +1081,7 @@ retry:
1081 rtn = FAILED; 1081 rtn = FAILED;
1082 break; 1082 break;
1083 } 1083 }
1084 } else if (!rtn) { 1084 } else if (rtn != FAILED) {
1085 scsi_abort_eh_cmnd(scmd); 1085 scsi_abort_eh_cmnd(scmd);
1086 rtn = FAILED; 1086 rtn = FAILED;
1087 } 1087 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ea95dd3e260..6d5c0b8cb0bb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
591static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) 591static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
592{ 592{
593 struct scatterlist *first_chunk = NULL; 593 struct scatterlist *first_chunk = NULL;
594 gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
595 int ret; 594 int ret;
596 595
597 BUG_ON(!nents); 596 BUG_ON(!nents);
@@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
606 } 605 }
607 606
608 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 607 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
609 first_chunk, gfp_mask, scsi_sg_alloc); 608 first_chunk, GFP_ATOMIC, scsi_sg_alloc);
610 if (unlikely(ret)) 609 if (unlikely(ret))
611 scsi_free_sgtable(sdb, mq); 610 scsi_free_sgtable(sdb, mq);
612 return ret; 611 return ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fedab3c21ddf..399516925d80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2623 sd_config_discard(sdkp, SD_LBP_WS16); 2623 sd_config_discard(sdkp, SD_LBP_WS16);
2624 2624
2625 } else { /* LBP VPD page tells us what to use */ 2625 } else { /* LBP VPD page tells us what to use */
2626 2626 if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
2627 if (sdkp->lbpws) 2627 sd_config_discard(sdkp, SD_LBP_UNMAP);
2628 else if (sdkp->lbpws)
2628 sd_config_discard(sdkp, SD_LBP_WS16); 2629 sd_config_discard(sdkp, SD_LBP_WS16);
2629 else if (sdkp->lbpws10) 2630 else if (sdkp->lbpws10)
2630 sd_config_discard(sdkp, SD_LBP_WS10); 2631 sd_config_discard(sdkp, SD_LBP_WS10);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index b410499cddca..aad6683db81b 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
341 default: 341 default:
342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
343 rxconf.src_addr_width = 1; 343 rxconf.src_addr_width = 1;
344 rxconf.src_maxburst = 1; 344 rxconf.src_maxburst = 4;
345 } 345 }
346 dmaengine_slave_config(spfi->rx_ch, &rxconf); 346 dmaengine_slave_config(spfi->rx_ch, &rxconf);
347 347
@@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
368 default: 368 default:
369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
370 txconf.dst_addr_width = 1; 370 txconf.dst_addr_width = 1;
371 txconf.dst_maxburst = 1; 371 txconf.dst_maxburst = 4;
372 break; 372 break;
373 } 373 }
374 dmaengine_slave_config(spfi->tx_ch, &txconf); 374 dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
390 dma_async_issue_pending(spfi->rx_ch); 390 dma_async_issue_pending(spfi->rx_ch);
391 } 391 }
392 392
393 spfi_start(spfi);
394
393 if (xfer->tx_buf) { 395 if (xfer->tx_buf) {
394 spfi->tx_dma_busy = true; 396 spfi->tx_dma_busy = true;
395 dmaengine_submit(txdesc); 397 dmaengine_submit(txdesc);
396 dma_async_issue_pending(spfi->tx_ch); 398 dma_async_issue_pending(spfi->tx_ch);
397 } 399 }
398 400
399 spfi_start(spfi);
400
401 return 1; 401 return 1;
402 402
403stop_dma: 403stop_dma:
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 239be7cbe5a8..96a5fc0878d8 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
480 struct device_node *np = spi->master->dev.of_node; 480 struct device_node *np = spi->master->dev.of_node;
481 struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); 481 struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
482 482
483 pm_runtime_get_sync(&p->pdev->dev);
484
483 if (!np) { 485 if (!np) {
484 /* 486 /*
485 * Use spi->controller_data for CS (same strategy as spi_gpio), 487 * Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
498 if (spi->cs_gpio >= 0) 500 if (spi->cs_gpio >= 0)
499 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 501 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
500 502
503
504 pm_runtime_put_sync(&p->pdev->dev);
505
501 return 0; 506 return 0;
502} 507}
503 508
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 1bf891bd321a..4f361b77c749 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -264,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
264 264
265 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && 265 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
266 inode->i_sb->s_root != NULL && 266 inode->i_sb->s_root != NULL &&
267 is_root_inode(inode)) 267 !is_root_inode(inode))
268 ll_invalidate_aliases(inode); 268 ll_invalidate_aliases(inode);
269 269
270 iput(inode); 270 iput(inode);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 86c72ba0a0cd..f8c5fc371c4c 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -2177,7 +2177,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
2177 /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ 2177 /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
2178 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ 2178 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
2179 /* Select VC1/VC2, CR215 = 0x02->0x06 */ 2179 /* Select VC1/VC2, CR215 = 0x02->0x06 */
2180 bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06); 2180 bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
2181 /* }} */ 2181 /* }} */
2182 2182
2183 for (ii = 0; ii < CB_VT3253B0_AGC; ii++) 2183 for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index c8f739dd346e..70f870541f92 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -182,6 +182,14 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
182 if (pDevice->byCurrentCh == uConnectionChannel) 182 if (pDevice->byCurrentCh == uConnectionChannel)
183 return bResult; 183 return bResult;
184 184
185 /* Set VGA to max sensitivity */
186 if (pDevice->bUpdateBBVGA &&
187 pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
188 pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
189
190 BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
191 }
192
185 /* clear NAV */ 193 /* clear NAV */
186 MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV); 194 MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
187 195
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 83e4162c0094..cd1a277d853b 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1232,7 +1232,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1232 1232
1233 head_td = priv->apCurrTD[dma_idx]; 1233 head_td = priv->apCurrTD[dma_idx];
1234 1234
1235 head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP); 1235 head_td->m_td1TD1.byTCR = 0;
1236 1236
1237 head_td->pTDInfo->skb = skb; 1237 head_td->pTDInfo->skb = skb;
1238 1238
@@ -1257,6 +1257,11 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1257 1257
1258 priv->bPWBitOn = false; 1258 priv->bPWBitOn = false;
1259 1259
1260 /* Set TSR1 & ReqCount in TxDescHead */
1261 head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
1262 head_td->m_td1TD1.wReqCount =
1263 cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
1264
1260 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; 1265 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1261 1266
1262 if (dma_idx == TYPE_AC0DMA) 1267 if (dma_idx == TYPE_AC0DMA)
@@ -1500,9 +1505,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1500 if (conf->enable_beacon) { 1505 if (conf->enable_beacon) {
1501 vnt_beacon_enable(priv, vif, conf); 1506 vnt_beacon_enable(priv, vif, conf);
1502 1507
1503 MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX); 1508 MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
1509 TCR_AUTOBCNTX);
1504 } else { 1510 } else {
1505 MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX); 1511 MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
1512 TCR_AUTOBCNTX);
1506 } 1513 }
1507 } 1514 }
1508 1515
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 61c39dd7ad01..b5b0155961f2 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1204,13 +1204,10 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
1204 1204
1205 ptdCurr = (PSTxDesc)pHeadTD; 1205 ptdCurr = (PSTxDesc)pHeadTD;
1206 1206
1207 ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; 1207 ptdCurr->pTDInfo->dwReqCount = cbReqCount;
1208 ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; 1208 ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
1209 ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; 1209 ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
1210 ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); 1210 ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
1211 /* Set TSR1 & ReqCount in TxDescHead */
1212 ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
1213 ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
1214 1211
1215 return cbHeaderLength; 1212 return cbHeaderLength;
1216} 1213}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 55f6774f706f..aebde3289c50 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2027 goto reject; 2027 goto reject;
2028 } 2028 }
2029 if (!strncmp("=All", text_ptr, 4)) { 2029 if (!strncmp("=All", text_ptr, 4)) {
2030 cmd->cmd_flags |= IFC_SENDTARGETS_ALL; 2030 cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2031 } else if (!strncmp("=iqn.", text_ptr, 5) || 2031 } else if (!strncmp("=iqn.", text_ptr, 5) ||
2032 !strncmp("=eui.", text_ptr, 5)) { 2032 !strncmp("=eui.", text_ptr, 5)) {
2033 cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; 2033 cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2034 } else { 2034 } else {
2035 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); 2035 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
2036 goto reject; 2036 goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3415 return -ENOMEM; 3415 return -ENOMEM;
3416 } 3416 }
3417 /* 3417 /*
3418 * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE 3418 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3419 * explicit case.. 3419 * explicit case..
3420 */ 3420 */
3421 if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { 3421 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3422 text_ptr = strchr(text_in, '='); 3422 text_ptr = strchr(text_in, '=');
3423 if (!text_ptr) { 3423 if (!text_ptr) {
3424 pr_err("Unable to locate '=' string in text_in:" 3424 pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3434 3434
3435 spin_lock(&tiqn_lock); 3435 spin_lock(&tiqn_lock);
3436 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3436 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3437 if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && 3437 if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3438 strcmp(tiqn->tiqn, text_ptr)) { 3438 strcmp(tiqn->tiqn, text_ptr)) {
3439 continue; 3439 continue;
3440 } 3440 }
@@ -3512,7 +3512,7 @@ eob:
3512 if (end_of_buf) 3512 if (end_of_buf)
3513 break; 3513 break;
3514 3514
3515 if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) 3515 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3516 break; 3516 break;
3517 } 3517 }
3518 spin_unlock(&tiqn_lock); 3518 spin_unlock(&tiqn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 09a522bae222..cbcff38ac9b7 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -135,8 +135,8 @@ enum cmd_flags_table {
135 ICF_CONTIG_MEMORY = 0x00000020, 135 ICF_CONTIG_MEMORY = 0x00000020,
136 ICF_ATTACHED_TO_RQUEUE = 0x00000040, 136 ICF_ATTACHED_TO_RQUEUE = 0x00000040,
137 ICF_OOO_CMDSN = 0x00000080, 137 ICF_OOO_CMDSN = 0x00000080,
138 IFC_SENDTARGETS_ALL = 0x00000100, 138 ICF_SENDTARGETS_ALL = 0x00000100,
139 IFC_SENDTARGETS_SINGLE = 0x00000200, 139 ICF_SENDTARGETS_SINGLE = 0x00000200,
140}; 140};
141 141
142/* struct iscsi_cmd->i_state */ 142/* struct iscsi_cmd->i_state */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7653cfb027a2..58f49ff69b14 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1103} 1103}
1104EXPORT_SYMBOL(se_dev_set_queue_depth); 1104EXPORT_SYMBOL(se_dev_set_queue_depth);
1105 1105
1106int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1107{
1108 int block_size = dev->dev_attrib.block_size;
1109
1110 if (dev->export_count) {
1111 pr_err("dev[%p]: Unable to change SE Device"
1112 " fabric_max_sectors while export_count is %d\n",
1113 dev, dev->export_count);
1114 return -EINVAL;
1115 }
1116 if (!fabric_max_sectors) {
1117 pr_err("dev[%p]: Illegal ZERO value for"
1118 " fabric_max_sectors\n", dev);
1119 return -EINVAL;
1120 }
1121 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1122 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1123 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1124 DA_STATUS_MAX_SECTORS_MIN);
1125 return -EINVAL;
1126 }
1127 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1129 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1130 " %u\n", dev, fabric_max_sectors,
1131 DA_STATUS_MAX_SECTORS_MAX);
1132 return -EINVAL;
1133 }
1134 /*
1135 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1136 */
1137 if (!block_size) {
1138 block_size = 512;
1139 pr_warn("Defaulting to 512 for zero block_size\n");
1140 }
1141 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1142 block_size);
1143
1144 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1145 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1146 dev, fabric_max_sectors);
1147 return 0;
1148}
1149EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
1150
1151int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1106int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1152{ 1107{
1153 if (dev->export_count) { 1108 if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1156 dev, dev->export_count); 1111 dev, dev->export_count);
1157 return -EINVAL; 1112 return -EINVAL;
1158 } 1113 }
1159 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1114 if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
1160 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1115 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1161 " greater than fabric_max_sectors: %u\n", dev, 1116 " greater than hw_max_sectors: %u\n", dev,
1162 optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1117 optimal_sectors, dev->dev_attrib.hw_max_sectors);
1163 return -EINVAL; 1118 return -EINVAL;
1164 } 1119 }
1165 1120
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1553 dev->dev_attrib.unmap_granularity_alignment = 1508 dev->dev_attrib.unmap_granularity_alignment =
1554 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1509 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1555 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 1510 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1556 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1557 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1558 1511
1559 xcopy_lun = &dev->xcopy_lun; 1512 xcopy_lun = &dev->xcopy_lun;
1560 xcopy_lun->lun_se_dev = dev; 1513 xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
1595 dev->dev_attrib.hw_max_sectors = 1548 dev->dev_attrib.hw_max_sectors =
1596 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1549 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1597 dev->dev_attrib.hw_block_size); 1550 dev->dev_attrib.hw_block_size);
1551 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
1598 1552
1599 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1553 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1600 dev->creation_time = get_jiffies_64(); 1554 dev->creation_time = get_jiffies_64();
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c2aea099ea4a..d836de200a03 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
621 struct fd_prot fd_prot; 621 struct fd_prot fd_prot;
622 sense_reason_t rc; 622 sense_reason_t rc;
623 int ret = 0; 623 int ret = 0;
624 624 /*
625 * We are currently limited by the number of iovecs (2048) per
626 * single vfs_[writev,readv] call.
627 */
628 if (cmd->data_length > FD_MAX_BYTES) {
629 pr_err("FILEIO: Not able to process I/O of %u bytes due to"
630 "FD_MAX_BYTES: %u iovec count limitiation\n",
631 cmd->data_length, FD_MAX_BYTES);
632 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
633 }
625 /* 634 /*
626 * Call vectorized fileio functions to map struct scatterlist 635 * Call vectorized fileio functions to map struct scatterlist
627 * physical memory addresses to struct iovec virtual memory. 636 * physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
959 &fileio_dev_attrib_hw_block_size.attr, 968 &fileio_dev_attrib_hw_block_size.attr,
960 &fileio_dev_attrib_block_size.attr, 969 &fileio_dev_attrib_block_size.attr,
961 &fileio_dev_attrib_hw_max_sectors.attr, 970 &fileio_dev_attrib_hw_max_sectors.attr,
962 &fileio_dev_attrib_fabric_max_sectors.attr,
963 &fileio_dev_attrib_optimal_sectors.attr, 971 &fileio_dev_attrib_optimal_sectors.attr,
964 &fileio_dev_attrib_hw_queue_depth.attr, 972 &fileio_dev_attrib_hw_queue_depth.attr,
965 &fileio_dev_attrib_queue_depth.attr, 973 &fileio_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 3efff94fbd97..78346b850968 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
124 q = bdev_get_queue(bd); 124 q = bdev_get_queue(bd);
125 125
126 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 126 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
127 dev->dev_attrib.hw_max_sectors = UINT_MAX; 127 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
128 dev->dev_attrib.hw_queue_depth = q->nr_requests; 128 dev->dev_attrib.hw_queue_depth = q->nr_requests;
129 129
130 /* 130 /*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
883 &iblock_dev_attrib_hw_block_size.attr, 883 &iblock_dev_attrib_hw_block_size.attr,
884 &iblock_dev_attrib_block_size.attr, 884 &iblock_dev_attrib_block_size.attr,
885 &iblock_dev_attrib_hw_max_sectors.attr, 885 &iblock_dev_attrib_hw_max_sectors.attr,
886 &iblock_dev_attrib_fabric_max_sectors.attr,
887 &iblock_dev_attrib_optimal_sectors.attr, 886 &iblock_dev_attrib_optimal_sectors.attr,
888 &iblock_dev_attrib_hw_queue_depth.attr, 887 &iblock_dev_attrib_hw_queue_depth.attr,
889 &iblock_dev_attrib_queue_depth.attr, 888 &iblock_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d56f2aaba9af..283cf786ef98 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
528 528
529 return 0; 529 return 0;
530 } 530 }
531 } else if (we && registered_nexus) {
532 /*
533 * Reads are allowed for Write Exclusive locks
534 * from all registrants.
535 */
536 if (cmd->data_direction == DMA_FROM_DEVICE) {
537 pr_debug("Allowing READ CDB: 0x%02x for %s"
538 " reservation\n", cdb[0],
539 core_scsi3_pr_dump_type(pr_reg_type));
540
541 return 0;
542 }
531 } 543 }
532 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" 544 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
533 " for %s reservation\n", transport_dump_cmd_direction(cmd), 545 " for %s reservation\n", transport_dump_cmd_direction(cmd),
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 60ebd170a561..98e83ac5661b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
657 &rd_mcp_dev_attrib_hw_block_size.attr, 657 &rd_mcp_dev_attrib_hw_block_size.attr,
658 &rd_mcp_dev_attrib_block_size.attr, 658 &rd_mcp_dev_attrib_block_size.attr,
659 &rd_mcp_dev_attrib_hw_max_sectors.attr, 659 &rd_mcp_dev_attrib_hw_max_sectors.attr,
660 &rd_mcp_dev_attrib_fabric_max_sectors.attr,
661 &rd_mcp_dev_attrib_optimal_sectors.attr, 660 &rd_mcp_dev_attrib_optimal_sectors.attr,
662 &rd_mcp_dev_attrib_hw_queue_depth.attr, 661 &rd_mcp_dev_attrib_hw_queue_depth.attr,
663 &rd_mcp_dev_attrib_queue_depth.attr, 662 &rd_mcp_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 11bea1952435..cd4bed7b2757 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
953 953
954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
955 unsigned long long end_lba; 955 unsigned long long end_lba;
956
957 if (sectors > dev->dev_attrib.fabric_max_sectors) {
958 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
959 " big sectors %u exceeds fabric_max_sectors:"
960 " %u\n", cdb[0], sectors,
961 dev->dev_attrib.fabric_max_sectors);
962 return TCM_INVALID_CDB_FIELD;
963 }
964 if (sectors > dev->dev_attrib.hw_max_sectors) {
965 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
966 " big sectors %u exceeds backend hw_max_sectors:"
967 " %u\n", cdb[0], sectors,
968 dev->dev_attrib.hw_max_sectors);
969 return TCM_INVALID_CDB_FIELD;
970 }
971check_lba: 956check_lba:
972 end_lba = dev->transport->get_blocks(dev) + 1; 957 end_lba = dev->transport->get_blocks(dev) + 1;
973 if (cmd->t_task_lba + sectors > end_lba) { 958 if (cmd->t_task_lba + sectors > end_lba) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 1307600fe726..4c71657da56a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -505,7 +505,6 @@ static sense_reason_t
505spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 505spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
506{ 506{
507 struct se_device *dev = cmd->se_dev; 507 struct se_device *dev = cmd->se_dev;
508 u32 max_sectors;
509 int have_tp = 0; 508 int have_tp = 0;
510 int opt, min; 509 int opt, min;
511 510
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
539 /* 538 /*
540 * Set MAXIMUM TRANSFER LENGTH 539 * Set MAXIMUM TRANSFER LENGTH
541 */ 540 */
542 max_sectors = min(dev->dev_attrib.fabric_max_sectors, 541 put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
543 dev->dev_attrib.hw_max_sectors);
544 put_unaligned_be32(max_sectors, &buf[8]);
545 542
546 /* 543 /*
547 * Set OPTIMAL TRANSFER LENGTH 544 * Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8bfa61c9693d..1157b559683b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
1118 &tcmu_dev_attrib_hw_block_size.attr, 1118 &tcmu_dev_attrib_hw_block_size.attr,
1119 &tcmu_dev_attrib_block_size.attr, 1119 &tcmu_dev_attrib_block_size.attr,
1120 &tcmu_dev_attrib_hw_max_sectors.attr, 1120 &tcmu_dev_attrib_hw_max_sectors.attr,
1121 &tcmu_dev_attrib_fabric_max_sectors.attr,
1122 &tcmu_dev_attrib_optimal_sectors.attr, 1121 &tcmu_dev_attrib_optimal_sectors.attr,
1123 &tcmu_dev_attrib_hw_queue_depth.attr, 1122 &tcmu_dev_attrib_hw_queue_depth.attr,
1124 &tcmu_dev_attrib_queue_depth.attr, 1123 &tcmu_dev_attrib_queue_depth.attr,
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index ad09e51ffae4..f65f0d109fc8 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> 5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
6 * 6 *
7 * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
8 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -28,6 +30,20 @@
28#include <linux/cpu.h> 30#include <linux/cpu.h>
29#include <linux/cpu_cooling.h> 31#include <linux/cpu_cooling.h>
30 32
33/*
34 * Cooling state <-> CPUFreq frequency
35 *
36 * Cooling states are translated to frequencies throughout this driver and this
37 * is the relation between them.
38 *
39 * Highest cooling state corresponds to lowest possible frequency.
40 *
41 * i.e.
42 * level 0 --> 1st Max Freq
43 * level 1 --> 2nd Max Freq
44 * ...
45 */
46
31/** 47/**
32 * struct cpufreq_cooling_device - data for cooling device with cpufreq 48 * struct cpufreq_cooling_device - data for cooling device with cpufreq
33 * @id: unique integer value corresponding to each cpufreq_cooling_device 49 * @id: unique integer value corresponding to each cpufreq_cooling_device
@@ -38,25 +54,27 @@
38 * cooling devices. 54 * cooling devices.
39 * @cpufreq_val: integer value representing the absolute value of the clipped 55 * @cpufreq_val: integer value representing the absolute value of the clipped
40 * frequency. 56 * frequency.
57 * @max_level: maximum cooling level. One less than total number of valid
58 * cpufreq frequencies.
41 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. 59 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
60 * @node: list_head to link all cpufreq_cooling_device together.
42 * 61 *
43 * This structure is required for keeping information of each 62 * This structure is required for keeping information of each registered
44 * cpufreq_cooling_device registered. In order to prevent corruption of this a 63 * cpufreq_cooling_device.
45 * mutex lock cooling_cpufreq_lock is used.
46 */ 64 */
47struct cpufreq_cooling_device { 65struct cpufreq_cooling_device {
48 int id; 66 int id;
49 struct thermal_cooling_device *cool_dev; 67 struct thermal_cooling_device *cool_dev;
50 unsigned int cpufreq_state; 68 unsigned int cpufreq_state;
51 unsigned int cpufreq_val; 69 unsigned int cpufreq_val;
70 unsigned int max_level;
71 unsigned int *freq_table; /* In descending order */
52 struct cpumask allowed_cpus; 72 struct cpumask allowed_cpus;
53 struct list_head node; 73 struct list_head node;
54}; 74};
55static DEFINE_IDR(cpufreq_idr); 75static DEFINE_IDR(cpufreq_idr);
56static DEFINE_MUTEX(cooling_cpufreq_lock); 76static DEFINE_MUTEX(cooling_cpufreq_lock);
57 77
58static unsigned int cpufreq_dev_count;
59
60static LIST_HEAD(cpufreq_dev_list); 78static LIST_HEAD(cpufreq_dev_list);
61 79
62/** 80/**
@@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
98/* Below code defines functions to be used for cpufreq as cooling device */ 116/* Below code defines functions to be used for cpufreq as cooling device */
99 117
100/** 118/**
101 * is_cpufreq_valid - function to check frequency transitioning capability. 119 * get_level: Find the level for a particular frequency
102 * @cpu: cpu for which check is needed. 120 * @cpufreq_dev: cpufreq_dev for which the property is required
121 * @freq: Frequency
103 * 122 *
104 * This function will check the current state of the system if 123 * Return: level on success, THERMAL_CSTATE_INVALID on error.
105 * it is capable of changing the frequency for a given @cpu.
106 *
107 * Return: 0 if the system is not currently capable of changing
108 * the frequency of given cpu. !0 in case the frequency is changeable.
109 */ 124 */
110static int is_cpufreq_valid(int cpu) 125static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
126 unsigned int freq)
111{ 127{
112 struct cpufreq_policy policy; 128 unsigned long level;
113
114 return !cpufreq_get_policy(&policy, cpu);
115}
116
117enum cpufreq_cooling_property {
118 GET_LEVEL,
119 GET_FREQ,
120 GET_MAXL,
121};
122
123/**
124 * get_property - fetch a property of interest for a give cpu.
125 * @cpu: cpu for which the property is required
126 * @input: query parameter
127 * @output: query return
128 * @property: type of query (frequency, level, max level)
129 *
130 * This is the common function to
131 * 1. get maximum cpu cooling states
132 * 2. translate frequency to cooling state
133 * 3. translate cooling state to frequency
134 * Note that the code may be not in good shape
135 * but it is written in this way in order to:
136 * a) reduce duplicate code as most of the code can be shared.
137 * b) make sure the logic is consistent when translating between
138 * cooling states and frequencies.
139 *
140 * Return: 0 on success, -EINVAL when invalid parameters are passed.
141 */
142static int get_property(unsigned int cpu, unsigned long input,
143 unsigned int *output,
144 enum cpufreq_cooling_property property)
145{
146 int i;
147 unsigned long max_level = 0, level = 0;
148 unsigned int freq = CPUFREQ_ENTRY_INVALID;
149 int descend = -1;
150 struct cpufreq_frequency_table *pos, *table =
151 cpufreq_frequency_get_table(cpu);
152
153 if (!output)
154 return -EINVAL;
155
156 if (!table)
157 return -EINVAL;
158
159 cpufreq_for_each_valid_entry(pos, table) {
160 /* ignore duplicate entry */
161 if (freq == pos->frequency)
162 continue;
163
164 /* get the frequency order */
165 if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
166 descend = freq > pos->frequency;
167
168 freq = pos->frequency;
169 max_level++;
170 }
171
172 /* No valid cpu frequency entry */
173 if (max_level == 0)
174 return -EINVAL;
175 129
176 /* max_level is an index, not a counter */ 130 for (level = 0; level <= cpufreq_dev->max_level; level++) {
177 max_level--; 131 if (freq == cpufreq_dev->freq_table[level])
132 return level;
178 133
179 /* get max level */ 134 if (freq > cpufreq_dev->freq_table[level])
180 if (property == GET_MAXL) { 135 break;
181 *output = (unsigned int)max_level;
182 return 0;
183 } 136 }
184 137
185 if (property == GET_FREQ) 138 return THERMAL_CSTATE_INVALID;
186 level = descend ? input : (max_level - input);
187
188 i = 0;
189 cpufreq_for_each_valid_entry(pos, table) {
190 /* ignore duplicate entry */
191 if (freq == pos->frequency)
192 continue;
193
194 /* now we have a valid frequency entry */
195 freq = pos->frequency;
196
197 if (property == GET_LEVEL && (unsigned int)input == freq) {
198 /* get level by frequency */
199 *output = descend ? i : (max_level - i);
200 return 0;
201 }
202 if (property == GET_FREQ && level == i) {
203 /* get frequency by level */
204 *output = freq;
205 return 0;
206 }
207 i++;
208 }
209
210 return -EINVAL;
211} 139}
212 140
213/** 141/**
214 * cpufreq_cooling_get_level - for a give cpu, return the cooling level. 142 * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
215 * @cpu: cpu for which the level is required 143 * @cpu: cpu for which the level is required
216 * @freq: the frequency of interest 144 * @freq: the frequency of interest
217 * 145 *
@@ -223,77 +151,21 @@ static int get_property(unsigned int cpu, unsigned long input,
223 */ 151 */
224unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) 152unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
225{ 153{
226 unsigned int val; 154 struct cpufreq_cooling_device *cpufreq_dev;
227
228 if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
229 return THERMAL_CSTATE_INVALID;
230
231 return (unsigned long)val;
232}
233EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
234
235/**
236 * get_cpu_frequency - get the absolute value of frequency from level.
237 * @cpu: cpu for which frequency is fetched.
238 * @level: cooling level
239 *
240 * This function matches cooling level with frequency. Based on a cooling level
241 * of frequency, equals cooling state of cpu cooling device, it will return
242 * the corresponding frequency.
243 * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
244 *
245 * Return: 0 on error, the corresponding frequency otherwise.
246 */
247static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
248{
249 int ret = 0;
250 unsigned int freq;
251
252 ret = get_property(cpu, level, &freq, GET_FREQ);
253 if (ret)
254 return 0;
255
256 return freq;
257}
258
259/**
260 * cpufreq_apply_cooling - function to apply frequency clipping.
261 * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
262 * clipping data.
263 * @cooling_state: value of the cooling state.
264 *
265 * Function used to make sure the cpufreq layer is aware of current thermal
266 * limits. The limits are applied by updating the cpufreq policy.
267 *
268 * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
269 * cooling state).
270 */
271static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
272 unsigned long cooling_state)
273{
274 unsigned int cpuid, clip_freq;
275 struct cpumask *mask = &cpufreq_device->allowed_cpus;
276 unsigned int cpu = cpumask_any(mask);
277
278
279 /* Check if the old cooling action is same as new cooling action */
280 if (cpufreq_device->cpufreq_state == cooling_state)
281 return 0;
282
283 clip_freq = get_cpu_frequency(cpu, cooling_state);
284 if (!clip_freq)
285 return -EINVAL;
286
287 cpufreq_device->cpufreq_state = cooling_state;
288 cpufreq_device->cpufreq_val = clip_freq;
289 155
290 for_each_cpu(cpuid, mask) { 156 mutex_lock(&cooling_cpufreq_lock);
291 if (is_cpufreq_valid(cpuid)) 157 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
292 cpufreq_update_policy(cpuid); 158 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
159 mutex_unlock(&cooling_cpufreq_lock);
160 return get_level(cpufreq_dev, freq);
161 }
293 } 162 }
163 mutex_unlock(&cooling_cpufreq_lock);
294 164
295 return 0; 165 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
166 return THERMAL_CSTATE_INVALID;
296} 167}
168EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
297 169
298/** 170/**
299 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. 171 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
@@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
323 &cpufreq_dev->allowed_cpus)) 195 &cpufreq_dev->allowed_cpus))
324 continue; 196 continue;
325 197
326 if (!cpufreq_dev->cpufreq_val)
327 cpufreq_dev->cpufreq_val = get_cpu_frequency(
328 cpumask_any(&cpufreq_dev->allowed_cpus),
329 cpufreq_dev->cpufreq_state);
330
331 max_freq = cpufreq_dev->cpufreq_val; 198 max_freq = cpufreq_dev->cpufreq_val;
332 199
333 if (policy->max != max_freq) 200 if (policy->max != max_freq)
@@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
354 unsigned long *state) 221 unsigned long *state)
355{ 222{
356 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 223 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
357 struct cpumask *mask = &cpufreq_device->allowed_cpus;
358 unsigned int cpu;
359 unsigned int count = 0;
360 int ret;
361
362 cpu = cpumask_any(mask);
363
364 ret = get_property(cpu, 0, &count, GET_MAXL);
365 224
366 if (count > 0) 225 *state = cpufreq_device->max_level;
367 *state = count; 226 return 0;
368
369 return ret;
370} 227}
371 228
372/** 229/**
@@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
403 unsigned long state) 260 unsigned long state)
404{ 261{
405 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 262 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
263 unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
264 unsigned int clip_freq;
265
266 /* Request state should be less than max_level */
267 if (WARN_ON(state > cpufreq_device->max_level))
268 return -EINVAL;
269
270 /* Check if the old cooling action is same as new cooling action */
271 if (cpufreq_device->cpufreq_state == state)
272 return 0;
406 273
407 return cpufreq_apply_cooling(cpufreq_device, state); 274 clip_freq = cpufreq_device->freq_table[state];
275 cpufreq_device->cpufreq_state = state;
276 cpufreq_device->cpufreq_val = clip_freq;
277
278 cpufreq_update_policy(cpu);
279
280 return 0;
408} 281}
409 282
410/* Bind cpufreq callbacks to thermal cooling device ops */ 283/* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
419 .notifier_call = cpufreq_thermal_notifier, 292 .notifier_call = cpufreq_thermal_notifier,
420}; 293};
421 294
295static unsigned int find_next_max(struct cpufreq_frequency_table *table,
296 unsigned int prev_max)
297{
298 struct cpufreq_frequency_table *pos;
299 unsigned int max = 0;
300
301 cpufreq_for_each_valid_entry(pos, table) {
302 if (pos->frequency > max && pos->frequency < prev_max)
303 max = pos->frequency;
304 }
305
306 return max;
307}
308
422/** 309/**
423 * __cpufreq_cooling_register - helper function to create cpufreq cooling device 310 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
424 * @np: a valid struct device_node to the cooling device device tree node 311 * @np: a valid struct device_node to the cooling device device tree node
425 * @clip_cpus: cpumask of cpus where the frequency constraints will happen. 312 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
313 * Normally this should be same as cpufreq policy->related_cpus.
426 * 314 *
427 * This interface function registers the cpufreq cooling device with the name 315 * This interface function registers the cpufreq cooling device with the name
428 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 316 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
437 const struct cpumask *clip_cpus) 325 const struct cpumask *clip_cpus)
438{ 326{
439 struct thermal_cooling_device *cool_dev; 327 struct thermal_cooling_device *cool_dev;
440 struct cpufreq_cooling_device *cpufreq_dev = NULL; 328 struct cpufreq_cooling_device *cpufreq_dev;
441 unsigned int min = 0, max = 0;
442 char dev_name[THERMAL_NAME_LENGTH]; 329 char dev_name[THERMAL_NAME_LENGTH];
443 int ret = 0, i; 330 struct cpufreq_frequency_table *pos, *table;
444 struct cpufreq_policy policy; 331 unsigned int freq, i;
332 int ret;
445 333
446 /* Verify that all the clip cpus have same freq_min, freq_max limit */ 334 table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
447 for_each_cpu(i, clip_cpus) { 335 if (!table) {
448 /* continue if cpufreq policy not found and not return error */ 336 pr_debug("%s: CPUFreq table not found\n", __func__);
449 if (!cpufreq_get_policy(&policy, i)) 337 return ERR_PTR(-EPROBE_DEFER);
450 continue;
451 if (min == 0 && max == 0) {
452 min = policy.cpuinfo.min_freq;
453 max = policy.cpuinfo.max_freq;
454 } else {
455 if (min != policy.cpuinfo.min_freq ||
456 max != policy.cpuinfo.max_freq)
457 return ERR_PTR(-EINVAL);
458 }
459 } 338 }
460 cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device), 339
461 GFP_KERNEL); 340 cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
462 if (!cpufreq_dev) 341 if (!cpufreq_dev)
463 return ERR_PTR(-ENOMEM); 342 return ERR_PTR(-ENOMEM);
464 343
344 /* Find max levels */
345 cpufreq_for_each_valid_entry(pos, table)
346 cpufreq_dev->max_level++;
347
348 cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
349 cpufreq_dev->max_level, GFP_KERNEL);
350 if (!cpufreq_dev->freq_table) {
351 cool_dev = ERR_PTR(-ENOMEM);
352 goto free_cdev;
353 }
354
355 /* max_level is an index, not a counter */
356 cpufreq_dev->max_level--;
357
465 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); 358 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
466 359
467 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 360 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
468 if (ret) { 361 if (ret) {
469 kfree(cpufreq_dev); 362 cool_dev = ERR_PTR(ret);
470 return ERR_PTR(-EINVAL); 363 goto free_table;
471 } 364 }
472 365
473 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", 366 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,25 +368,44 @@ __cpufreq_cooling_register(struct device_node *np,
475 368
476 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, 369 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
477 &cpufreq_cooling_ops); 370 &cpufreq_cooling_ops);
478 if (IS_ERR(cool_dev)) { 371 if (IS_ERR(cool_dev))
479 release_idr(&cpufreq_idr, cpufreq_dev->id); 372 goto remove_idr;
480 kfree(cpufreq_dev); 373
481 return cool_dev; 374 /* Fill freq-table in descending order of frequencies */
375 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
376 freq = find_next_max(table, freq);
377 cpufreq_dev->freq_table[i] = freq;
378
379 /* Warn for duplicate entries */
380 if (!freq)
381 pr_warn("%s: table has duplicate entries\n", __func__);
382 else
383 pr_debug("%s: freq:%u KHz\n", __func__, freq);
482 } 384 }
385
386 cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
483 cpufreq_dev->cool_dev = cool_dev; 387 cpufreq_dev->cool_dev = cool_dev;
484 cpufreq_dev->cpufreq_state = 0; 388
485 mutex_lock(&cooling_cpufreq_lock); 389 mutex_lock(&cooling_cpufreq_lock);
486 390
487 /* Register the notifier for first cpufreq cooling device */ 391 /* Register the notifier for first cpufreq cooling device */
488 if (cpufreq_dev_count == 0) 392 if (list_empty(&cpufreq_dev_list))
489 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 393 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
490 CPUFREQ_POLICY_NOTIFIER); 394 CPUFREQ_POLICY_NOTIFIER);
491 cpufreq_dev_count++;
492 list_add(&cpufreq_dev->node, &cpufreq_dev_list); 395 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
493 396
494 mutex_unlock(&cooling_cpufreq_lock); 397 mutex_unlock(&cooling_cpufreq_lock);
495 398
496 return cool_dev; 399 return cool_dev;
400
401remove_idr:
402 release_idr(&cpufreq_idr, cpufreq_dev->id);
403free_table:
404 kfree(cpufreq_dev->freq_table);
405free_cdev:
406 kfree(cpufreq_dev);
407
408 return cool_dev;
497} 409}
498 410
499/** 411/**
@@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
554 cpufreq_dev = cdev->devdata; 466 cpufreq_dev = cdev->devdata;
555 mutex_lock(&cooling_cpufreq_lock); 467 mutex_lock(&cooling_cpufreq_lock);
556 list_del(&cpufreq_dev->node); 468 list_del(&cpufreq_dev->node);
557 cpufreq_dev_count--;
558 469
559 /* Unregister the notifier for the last cpufreq cooling device */ 470 /* Unregister the notifier for the last cpufreq cooling device */
560 if (cpufreq_dev_count == 0) 471 if (list_empty(&cpufreq_dev_list))
561 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 472 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
562 CPUFREQ_POLICY_NOTIFIER); 473 CPUFREQ_POLICY_NOTIFIER);
563 mutex_unlock(&cooling_cpufreq_lock); 474 mutex_unlock(&cooling_cpufreq_lock);
564 475
565 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 476 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
566 release_idr(&cpufreq_idr, cpufreq_dev->id); 477 release_idr(&cpufreq_idr, cpufreq_dev->id);
478 kfree(cpufreq_dev->freq_table);
567 kfree(cpufreq_dev); 479 kfree(cpufreq_dev);
568} 480}
569EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister); 481EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 000d53e934a0..607b62c7e611 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/cpu_cooling.h> 20#include <linux/cpu_cooling.h>
21#include <linux/cpufreq.h>
22#include <linux/err.h> 21#include <linux/err.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/of.h> 23#include <linux/of.h>
@@ -28,18 +27,17 @@
28static int db8500_cpufreq_cooling_probe(struct platform_device *pdev) 27static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
29{ 28{
30 struct thermal_cooling_device *cdev; 29 struct thermal_cooling_device *cdev;
31 struct cpumask mask_val;
32
33 /* make sure cpufreq driver has been initialized */
34 if (!cpufreq_frequency_get_table(0))
35 return -EPROBE_DEFER;
36
37 cpumask_set_cpu(0, &mask_val);
38 cdev = cpufreq_cooling_register(&mask_val);
39 30
31 cdev = cpufreq_cooling_register(cpu_present_mask);
40 if (IS_ERR(cdev)) { 32 if (IS_ERR(cdev)) {
41 dev_err(&pdev->dev, "Failed to register cooling device\n"); 33 int ret = PTR_ERR(cdev);
42 return PTR_ERR(cdev); 34
35 if (ret != -EPROBE_DEFER)
36 dev_err(&pdev->dev,
37 "Failed to register cooling device %d\n",
38 ret);
39
40 return ret;
43 } 41 }
44 42
45 platform_set_drvdata(pdev, cdev); 43 platform_set_drvdata(pdev, cdev);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 88b32f942dcf..2ccbc0788353 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/cpu_cooling.h> 11#include <linux/cpu_cooling.h>
12#include <linux/cpufreq.h>
13#include <linux/delay.h> 12#include <linux/delay.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/init.h> 14#include <linux/init.h>
@@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
454 const struct of_device_id *of_id = 453 const struct of_device_id *of_id =
455 of_match_device(of_imx_thermal_match, &pdev->dev); 454 of_match_device(of_imx_thermal_match, &pdev->dev);
456 struct imx_thermal_data *data; 455 struct imx_thermal_data *data;
457 struct cpumask clip_cpus;
458 struct regmap *map; 456 struct regmap *map;
459 int measure_freq; 457 int measure_freq;
460 int ret; 458 int ret;
461 459
462 if (!cpufreq_get_current_driver()) {
463 dev_dbg(&pdev->dev, "no cpufreq driver!");
464 return -EPROBE_DEFER;
465 }
466 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 460 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
467 if (!data) 461 if (!data)
468 return -ENOMEM; 462 return -ENOMEM;
@@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
516 regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF); 510 regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
517 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); 511 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
518 512
519 cpumask_set_cpu(0, &clip_cpus); 513 data->cdev = cpufreq_cooling_register(cpu_present_mask);
520 data->cdev = cpufreq_cooling_register(&clip_cpus);
521 if (IS_ERR(data->cdev)) { 514 if (IS_ERR(data->cdev)) {
522 ret = PTR_ERR(data->cdev); 515 ret = PTR_ERR(data->cdev);
523 dev_err(&pdev->dev, 516 if (ret != -EPROBE_DEFER)
524 "failed to register cpufreq cooling device: %d\n", ret); 517 dev_err(&pdev->dev,
518 "failed to register cpufreq cooling device: %d\n",
519 ret);
525 return ret; 520 return ret;
526 } 521 }
527 522
@@ -613,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
613 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); 608 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
614 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); 609 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
615 data->mode = THERMAL_DEVICE_DISABLED; 610 data->mode = THERMAL_DEVICE_DISABLED;
611 clk_disable_unprepare(data->thermal_clk);
616 612
617 return 0; 613 return 0;
618} 614}
@@ -622,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
622 struct imx_thermal_data *data = dev_get_drvdata(dev); 618 struct imx_thermal_data *data = dev_get_drvdata(dev);
623 struct regmap *map = data->tempmon; 619 struct regmap *map = data->tempmon;
624 620
621 clk_prepare_enable(data->thermal_clk);
625 /* Enabled thermal sensor after resume */ 622 /* Enabled thermal sensor after resume */
626 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); 623 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
627 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); 624 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ffe40bffaf1a..d4413698a85f 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o 1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
2obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o 2obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
3obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o 3obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
4obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
4obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o 5obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index e4e61b3fb11e..2c2ec7666eb1 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
82 struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" }; 82 struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
83 83
84 if (!acpi_has_method(handle, "_TRT")) 84 if (!acpi_has_method(handle, "_TRT"))
85 return 0; 85 return -ENODEV;
86 86
87 status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer); 87 status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
88 if (ACPI_FAILURE(status)) 88 if (ACPI_FAILURE(status))
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
119 continue; 119 continue;
120 120
121 result = acpi_bus_get_device(trt->source, &adev); 121 result = acpi_bus_get_device(trt->source, &adev);
122 if (!result) 122 if (result)
123 acpi_create_platform_device(adev);
124 else
125 pr_warn("Failed to get source ACPI device\n"); 123 pr_warn("Failed to get source ACPI device\n");
126 124
127 result = acpi_bus_get_device(trt->target, &adev); 125 result = acpi_bus_get_device(trt->target, &adev);
128 if (!result) 126 if (result)
129 acpi_create_platform_device(adev);
130 else
131 pr_warn("Failed to get target ACPI device\n"); 127 pr_warn("Failed to get target ACPI device\n");
132 } 128 }
133 129
@@ -167,7 +163,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
167 sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" }; 163 sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
168 164
169 if (!acpi_has_method(handle, "_ART")) 165 if (!acpi_has_method(handle, "_ART"))
170 return 0; 166 return -ENODEV;
171 167
172 status = acpi_evaluate_object(handle, "_ART", NULL, &buffer); 168 status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
173 if (ACPI_FAILURE(status)) 169 if (ACPI_FAILURE(status))
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
206 202
207 if (art->source) { 203 if (art->source) {
208 result = acpi_bus_get_device(art->source, &adev); 204 result = acpi_bus_get_device(art->source, &adev);
209 if (!result) 205 if (result)
210 acpi_create_platform_device(adev);
211 else
212 pr_warn("Failed to get source ACPI device\n"); 206 pr_warn("Failed to get source ACPI device\n");
213 } 207 }
214 if (art->target) { 208 if (art->target) {
215 result = acpi_bus_get_device(art->target, &adev); 209 result = acpi_bus_get_device(art->target, &adev);
216 if (!result) 210 if (result)
217 acpi_create_platform_device(adev);
218 else
219 pr_warn("Failed to get source ACPI device\n"); 211 pr_warn("Failed to get source ACPI device\n");
220 } 212 }
221 } 213 }
@@ -321,8 +313,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
321 unsigned long length = 0; 313 unsigned long length = 0;
322 int count = 0; 314 int count = 0;
323 char __user *arg = (void __user *)__arg; 315 char __user *arg = (void __user *)__arg;
324 struct trt *trts; 316 struct trt *trts = NULL;
325 struct art *arts; 317 struct art *arts = NULL;
326 318
327 switch (cmd) { 319 switch (cmd) {
328 case ACPI_THERMAL_GET_TRT_COUNT: 320 case ACPI_THERMAL_GET_TRT_COUNT:
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index dcb306ea14a4..65a98a97df07 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -335,7 +335,6 @@ static struct platform_driver int3400_thermal_driver = {
335 .remove = int3400_thermal_remove, 335 .remove = int3400_thermal_remove,
336 .driver = { 336 .driver = {
337 .name = "int3400 thermal", 337 .name = "int3400 thermal",
338 .owner = THIS_MODULE,
339 .acpi_match_table = ACPI_PTR(int3400_thermal_match), 338 .acpi_match_table = ACPI_PTR(int3400_thermal_match),
340 }, 339 },
341}; 340};
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index a5d08c14ba24..c5cbc3af3a05 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -231,7 +231,6 @@ static struct platform_driver int3402_thermal_driver = {
231 .remove = int3402_thermal_remove, 231 .remove = int3402_thermal_remove,
232 .driver = { 232 .driver = {
233 .name = "int3402 thermal", 233 .name = "int3402 thermal",
234 .owner = THIS_MODULE,
235 .acpi_match_table = int3402_thermal_match, 234 .acpi_match_table = int3402_thermal_match,
236 }, 235 },
237}; 236};
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 1bfa6a69e77a..0faf500d8a77 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -301,6 +301,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
301{ 301{
302 struct int3403_sensor *obj = priv->priv; 302 struct int3403_sensor *obj = priv->priv;
303 303
304 acpi_remove_notify_handler(priv->adev->handle,
305 ACPI_DEVICE_NOTIFY, int3403_notify);
304 thermal_zone_device_unregister(obj->tzone); 306 thermal_zone_device_unregister(obj->tzone);
305 return 0; 307 return 0;
306} 308}
@@ -369,6 +371,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
369 p = buf.pointer; 371 p = buf.pointer;
370 if (!p || (p->type != ACPI_TYPE_PACKAGE)) { 372 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
371 printk(KERN_WARNING "Invalid PPSS data\n"); 373 printk(KERN_WARNING "Invalid PPSS data\n");
374 kfree(buf.pointer);
372 return -EFAULT; 375 return -EFAULT;
373 } 376 }
374 377
@@ -381,6 +384,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
381 384
382 priv->priv = obj; 385 priv->priv = obj;
383 386
387 kfree(buf.pointer);
384 /* TODO: add ACPI notification support */ 388 /* TODO: add ACPI notification support */
385 389
386 return result; 390 return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644
index 000000000000..0fe5dbbea968
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -0,0 +1,311 @@
1/*
2 * processor_thermal_device.c
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/platform_device.h>
20#include <linux/acpi.h>
21
22/* Broadwell-U/HSB thermal reporting device */
23#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
24#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
25
26/* Braswell thermal reporting device */
27#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
28
29struct power_config {
30 u32 index;
31 u32 min_uw;
32 u32 max_uw;
33 u32 tmin_us;
34 u32 tmax_us;
35 u32 step_uw;
36};
37
38struct proc_thermal_device {
39 struct device *dev;
40 struct acpi_device *adev;
41 struct power_config power_limits[2];
42};
43
44enum proc_thermal_emum_mode_type {
45 PROC_THERMAL_NONE,
46 PROC_THERMAL_PCI,
47 PROC_THERMAL_PLATFORM_DEV
48};
49
50/*
51 * We can have only one type of enumeration, PCI or Platform,
52 * not both. So we don't need instance specific data.
53 */
54static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
55 PROC_THERMAL_NONE;
56
57#define POWER_LIMIT_SHOW(index, suffix) \
58static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
59 struct device_attribute *attr, \
60 char *buf) \
61{ \
62 struct pci_dev *pci_dev; \
63 struct platform_device *pdev; \
64 struct proc_thermal_device *proc_dev; \
65\
66 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
67 pdev = to_platform_device(dev); \
68 proc_dev = platform_get_drvdata(pdev); \
69 } else { \
70 pci_dev = to_pci_dev(dev); \
71 proc_dev = pci_get_drvdata(pci_dev); \
72 } \
73 return sprintf(buf, "%lu\n",\
74 (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
75}
76
77POWER_LIMIT_SHOW(0, min_uw)
78POWER_LIMIT_SHOW(0, max_uw)
79POWER_LIMIT_SHOW(0, step_uw)
80POWER_LIMIT_SHOW(0, tmin_us)
81POWER_LIMIT_SHOW(0, tmax_us)
82
83POWER_LIMIT_SHOW(1, min_uw)
84POWER_LIMIT_SHOW(1, max_uw)
85POWER_LIMIT_SHOW(1, step_uw)
86POWER_LIMIT_SHOW(1, tmin_us)
87POWER_LIMIT_SHOW(1, tmax_us)
88
89static DEVICE_ATTR_RO(power_limit_0_min_uw);
90static DEVICE_ATTR_RO(power_limit_0_max_uw);
91static DEVICE_ATTR_RO(power_limit_0_step_uw);
92static DEVICE_ATTR_RO(power_limit_0_tmin_us);
93static DEVICE_ATTR_RO(power_limit_0_tmax_us);
94
95static DEVICE_ATTR_RO(power_limit_1_min_uw);
96static DEVICE_ATTR_RO(power_limit_1_max_uw);
97static DEVICE_ATTR_RO(power_limit_1_step_uw);
98static DEVICE_ATTR_RO(power_limit_1_tmin_us);
99static DEVICE_ATTR_RO(power_limit_1_tmax_us);
100
101static struct attribute *power_limit_attrs[] = {
102 &dev_attr_power_limit_0_min_uw.attr,
103 &dev_attr_power_limit_1_min_uw.attr,
104 &dev_attr_power_limit_0_max_uw.attr,
105 &dev_attr_power_limit_1_max_uw.attr,
106 &dev_attr_power_limit_0_step_uw.attr,
107 &dev_attr_power_limit_1_step_uw.attr,
108 &dev_attr_power_limit_0_tmin_us.attr,
109 &dev_attr_power_limit_1_tmin_us.attr,
110 &dev_attr_power_limit_0_tmax_us.attr,
111 &dev_attr_power_limit_1_tmax_us.attr,
112 NULL
113};
114
115static struct attribute_group power_limit_attribute_group = {
116 .attrs = power_limit_attrs,
117 .name = "power_limits"
118};
119
120static int proc_thermal_add(struct device *dev,
121 struct proc_thermal_device **priv)
122{
123 struct proc_thermal_device *proc_priv;
124 struct acpi_device *adev;
125 acpi_status status;
126 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
127 union acpi_object *elements, *ppcc;
128 union acpi_object *p;
129 int i;
130 int ret;
131
132 adev = ACPI_COMPANION(dev);
133 if (!adev)
134 return -ENODEV;
135
136 status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
137 if (ACPI_FAILURE(status))
138 return -ENODEV;
139
140 p = buf.pointer;
141 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
142 dev_err(dev, "Invalid PPCC data\n");
143 ret = -EFAULT;
144 goto free_buffer;
145 }
146 if (!p->package.count) {
147 dev_err(dev, "Invalid PPCC package size\n");
148 ret = -EFAULT;
149 goto free_buffer;
150 }
151
152 proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
153 if (!proc_priv) {
154 ret = -ENOMEM;
155 goto free_buffer;
156 }
157
158 proc_priv->dev = dev;
159 proc_priv->adev = adev;
160
161 for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
162 elements = &(p->package.elements[i+1]);
163 if (elements->type != ACPI_TYPE_PACKAGE ||
164 elements->package.count != 6) {
165 ret = -EFAULT;
166 goto free_buffer;
167 }
168 ppcc = elements->package.elements;
169 proc_priv->power_limits[i].index = ppcc[0].integer.value;
170 proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
171 proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
172 proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
173 proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
174 proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
175 }
176
177 *priv = proc_priv;
178
179 ret = sysfs_create_group(&dev->kobj,
180 &power_limit_attribute_group);
181
182free_buffer:
183 kfree(buf.pointer);
184
185 return ret;
186}
187
188void proc_thermal_remove(struct proc_thermal_device *proc_priv)
189{
190 sysfs_remove_group(&proc_priv->dev->kobj,
191 &power_limit_attribute_group);
192}
193
194static int int3401_add(struct platform_device *pdev)
195{
196 struct proc_thermal_device *proc_priv;
197 int ret;
198
199 if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
200 dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
201 return -ENODEV;
202 }
203
204 ret = proc_thermal_add(&pdev->dev, &proc_priv);
205 if (ret)
206 return ret;
207
208 platform_set_drvdata(pdev, proc_priv);
209 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
210
211 return 0;
212}
213
214static int int3401_remove(struct platform_device *pdev)
215{
216 proc_thermal_remove(platform_get_drvdata(pdev));
217
218 return 0;
219}
220
221static int proc_thermal_pci_probe(struct pci_dev *pdev,
222 const struct pci_device_id *unused)
223{
224 struct proc_thermal_device *proc_priv;
225 int ret;
226
227 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
228 dev_err(&pdev->dev, "error: enumerated as platform dev\n");
229 return -ENODEV;
230 }
231
232 ret = pci_enable_device(pdev);
233 if (ret < 0) {
234 dev_err(&pdev->dev, "error: could not enable device\n");
235 return ret;
236 }
237
238 ret = proc_thermal_add(&pdev->dev, &proc_priv);
239 if (ret) {
240 pci_disable_device(pdev);
241 return ret;
242 }
243
244 pci_set_drvdata(pdev, proc_priv);
245 proc_thermal_emum_mode = PROC_THERMAL_PCI;
246
247 return 0;
248}
249
250static void proc_thermal_pci_remove(struct pci_dev *pdev)
251{
252 proc_thermal_remove(pci_get_drvdata(pdev));
253 pci_disable_device(pdev);
254}
255
256static const struct pci_device_id proc_thermal_pci_ids[] = {
257 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
258 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
259 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
260 { 0, },
261};
262
263MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
264
265static struct pci_driver proc_thermal_pci_driver = {
266 .name = "proc_thermal",
267 .probe = proc_thermal_pci_probe,
268 .remove = proc_thermal_pci_remove,
269 .id_table = proc_thermal_pci_ids,
270};
271
272static const struct acpi_device_id int3401_device_ids[] = {
273 {"INT3401", 0},
274 {"", 0},
275};
276MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
277
278static struct platform_driver int3401_driver = {
279 .probe = int3401_add,
280 .remove = int3401_remove,
281 .driver = {
282 .name = "int3401 thermal",
283 .acpi_match_table = int3401_device_ids,
284 },
285};
286
287static int __init proc_thermal_init(void)
288{
289 int ret;
290
291 ret = platform_driver_register(&int3401_driver);
292 if (ret)
293 return ret;
294
295 ret = pci_register_driver(&proc_thermal_pci_driver);
296
297 return ret;
298}
299
300static void __exit proc_thermal_exit(void)
301{
302 platform_driver_unregister(&int3401_driver);
303 pci_unregister_driver(&proc_thermal_pci_driver);
304}
305
306module_init(proc_thermal_init);
307module_exit(proc_thermal_exit);
308
309MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
310MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
311MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index e98b4249187c..6ceebd659dd4 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
688 { X86_VENDOR_INTEL, 6, 0x45}, 688 { X86_VENDOR_INTEL, 6, 0x45},
689 { X86_VENDOR_INTEL, 6, 0x46}, 689 { X86_VENDOR_INTEL, 6, 0x46},
690 { X86_VENDOR_INTEL, 6, 0x4c}, 690 { X86_VENDOR_INTEL, 6, 0x4c},
691 { X86_VENDOR_INTEL, 6, 0x56},
691 {} 692 {}
692}; 693};
693MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); 694MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index e145b66df444..d717f3dab6f1 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
149 * 149 *
150 * Return: pointer to trip points table, NULL otherwise 150 * Return: pointer to trip points table, NULL otherwise
151 */ 151 */
152const struct thermal_trip * const 152const struct thermal_trip *
153of_thermal_get_trip_points(struct thermal_zone_device *tz) 153of_thermal_get_trip_points(struct thermal_zone_device *tz)
154{ 154{
155 struct __thermal_zone *data = tz->devdata; 155 struct __thermal_zone *data = tz->devdata;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8803e693fe68..2580a4872f90 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
63 struct mutex lock; 63 struct mutex lock;
64 struct list_head list; 64 struct list_head list;
65 int id; 65 int id;
66 int ctemp; 66 u32 ctemp;
67}; 67};
68 68
69#define rcar_thermal_for_each_priv(pos, common) \ 69#define rcar_thermal_for_each_priv(pos, common) \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
145{ 145{
146 struct device *dev = rcar_priv_to_dev(priv); 146 struct device *dev = rcar_priv_to_dev(priv);
147 int i; 147 int i;
148 int ctemp, old, new; 148 u32 ctemp, old, new;
149 int ret = -EINVAL; 149 int ret = -EINVAL;
150 150
151 mutex_lock(&priv->lock); 151 mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
372 int i; 372 int i;
373 int ret = -ENODEV; 373 int ret = -ENODEV;
374 int idle = IDLE_INTERVAL; 374 int idle = IDLE_INTERVAL;
375 u32 enr_bits = 0;
375 376
376 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); 377 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
377 if (!common) 378 if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
390 391
391 /* 392 /*
392 * platform has IRQ support. 393 * platform has IRQ support.
393 * Then, drier use common register 394 * Then, driver uses common registers
394 */ 395 */
395 396
396 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, 397 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
408 if (IS_ERR(common->base)) 409 if (IS_ERR(common->base))
409 return PTR_ERR(common->base); 410 return PTR_ERR(common->base);
410 411
411 /* enable temperature comparation */
412 rcar_thermal_common_write(common, ENR, 0x00030303);
413
414 idle = 0; /* polling delay is not needed */ 412 idle = 0; /* polling delay is not needed */
415 } 413 }
416 414
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
452 rcar_thermal_irq_enable(priv); 450 rcar_thermal_irq_enable(priv);
453 451
454 list_move_tail(&priv->list, &common->head); 452 list_move_tail(&priv->list, &common->head);
453
454 /* update ENR bits */
455 enr_bits |= 3 << (i * 8);
455 } 456 }
456 457
458 /* enable temperature comparation */
459 if (irq)
460 rcar_thermal_common_write(common, ENR, enr_bits);
461
457 platform_set_drvdata(pdev, common); 462 platform_set_drvdata(pdev, common);
458 463
459 dev_info(dev, "%d sensor probed\n", i); 464 dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 1bcddfc60e91..9c6ce548e363 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -677,7 +677,6 @@ static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
677static struct platform_driver rockchip_thermal_driver = { 677static struct platform_driver rockchip_thermal_driver = {
678 .driver = { 678 .driver = {
679 .name = "rockchip-thermal", 679 .name = "rockchip-thermal",
680 .owner = THIS_MODULE,
681 .pm = &rockchip_thermal_pm_ops, 680 .pm = &rockchip_thermal_pm_ops,
682 .of_match_table = of_rockchip_thermal_match, 681 .of_match_table = of_rockchip_thermal_match,
683 }, 682 },
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index f760389a204c..c43306ecc0ab 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
1config EXYNOS_THERMAL 1config EXYNOS_THERMAL
2 tristate "Exynos thermal management unit driver" 2 tristate "Exynos thermal management unit driver"
3 depends on ARCH_HAS_BANDGAP && OF 3 depends on OF
4 help 4 help
5 If you say yes here you get support for the TMU (Thermal Management 5 If you say yes here you get support for the TMU (Thermal Management
6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises 6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index b6be572704a4..6dc3815cc73f 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -347,7 +347,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
347int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) 347int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
348{ 348{
349 int ret; 349 int ret;
350 struct cpumask mask_val;
351 struct exynos_thermal_zone *th_zone; 350 struct exynos_thermal_zone *th_zone;
352 351
353 if (!sensor_conf || !sensor_conf->read_temperature) { 352 if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@ int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
367 * sensor 366 * sensor
368 */ 367 */
369 if (sensor_conf->cooling_data.freq_clip_count > 0) { 368 if (sensor_conf->cooling_data.freq_clip_count > 0) {
370 cpumask_set_cpu(0, &mask_val);
371 th_zone->cool_dev[th_zone->cool_dev_size] = 369 th_zone->cool_dev[th_zone->cool_dev_size] =
372 cpufreq_cooling_register(&mask_val); 370 cpufreq_cooling_register(cpu_present_mask);
373 if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) { 371 if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
374 dev_err(sensor_conf->dev, 372 ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
375 "Failed to register cpufreq cooling device\n"); 373 if (ret != -EPROBE_DEFER)
376 ret = -EINVAL; 374 dev_err(sensor_conf->dev,
375 "Failed to register cpufreq cooling device: %d\n",
376 ret);
377 goto err_unregister; 377 goto err_unregister;
378 } 378 }
379 th_zone->cool_dev_size++; 379 th_zone->cool_dev_size++;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d44d91d681d4..d2f1e62a4232 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -927,7 +927,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
927 /* Register the sensor with thermal management interface */ 927 /* Register the sensor with thermal management interface */
928 ret = exynos_register_thermal(sensor_conf); 928 ret = exynos_register_thermal(sensor_conf);
929 if (ret) { 929 if (ret) {
930 dev_err(&pdev->dev, "Failed to register thermal interface\n"); 930 if (ret != -EPROBE_DEFER)
931 dev_err(&pdev->dev,
932 "Failed to register thermal interface: %d\n",
933 ret);
931 goto err_clk; 934 goto err_clk;
932 } 935 }
933 data->reg_conf = sensor_conf; 936 data->reg_conf = sensor_conf;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 84fdf0792e27..87e0b0782023 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -930,7 +930,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
930 struct thermal_zone_device *pos1; 930 struct thermal_zone_device *pos1;
931 struct thermal_cooling_device *pos2; 931 struct thermal_cooling_device *pos2;
932 unsigned long max_state; 932 unsigned long max_state;
933 int result; 933 int result, ret;
934 934
935 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) 935 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
936 return -EINVAL; 936 return -EINVAL;
@@ -947,7 +947,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
947 if (tz != pos1 || cdev != pos2) 947 if (tz != pos1 || cdev != pos2)
948 return -EINVAL; 948 return -EINVAL;
949 949
950 cdev->ops->get_max_state(cdev, &max_state); 950 ret = cdev->ops->get_max_state(cdev, &max_state);
951 if (ret)
952 return ret;
951 953
952 /* lower default 0, upper default max_state */ 954 /* lower default 0, upper default max_state */
953 lower = lower == THERMAL_NO_LIMIT ? 0 : lower; 955 lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 9083e7520623..0531c752fbbb 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
91void of_thermal_destroy_zones(void); 91void of_thermal_destroy_zones(void);
92int of_thermal_get_ntrips(struct thermal_zone_device *); 92int of_thermal_get_ntrips(struct thermal_zone_device *);
93bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); 93bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
94const struct thermal_trip * const 94const struct thermal_trip *
95of_thermal_get_trip_points(struct thermal_zone_device *); 95of_thermal_get_trip_points(struct thermal_zone_device *);
96#else 96#else
97static inline int of_parse_thermal_zones(void) { return 0; } 97static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
105{ 105{
106 return 0; 106 return 0;
107} 107}
108static inline const struct thermal_trip * const 108static inline const struct thermal_trip *
109of_thermal_get_trip_points(struct thermal_zone_device *tz) 109of_thermal_get_trip_points(struct thermal_zone_device *tz)
110{ 110{
111 return NULL; 111 return NULL;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5fd03865e396..3fb054a10f6a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -28,7 +28,6 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/thermal.h> 30#include <linux/thermal.h>
31#include <linux/cpufreq.h>
32#include <linux/cpumask.h> 31#include <linux/cpumask.h>
33#include <linux/cpu_cooling.h> 32#include <linux/cpu_cooling.h>
34#include <linux/of.h> 33#include <linux/of.h>
@@ -407,17 +406,17 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
407 if (!data) 406 if (!data)
408 return -EINVAL; 407 return -EINVAL;
409 408
410 if (!cpufreq_get_current_driver()) {
411 dev_dbg(bgp->dev, "no cpufreq driver yet\n");
412 return -EPROBE_DEFER;
413 }
414
415 /* Register cooling device */ 409 /* Register cooling device */
416 data->cool_dev = cpufreq_cooling_register(cpu_present_mask); 410 data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
417 if (IS_ERR(data->cool_dev)) { 411 if (IS_ERR(data->cool_dev)) {
418 dev_err(bgp->dev, 412 int ret = PTR_ERR(data->cool_dev);
419 "Failed to register cpufreq cooling device\n"); 413
420 return PTR_ERR(data->cool_dev); 414 if (ret != -EPROBE_DEFER)
415 dev_err(bgp->dev,
416 "Failed to register cpu cooling device %d\n",
417 ret);
418
419 return ret;
421 } 420 }
422 ti_bandgap_set_sensor_data(bgp, id, data); 421 ti_bandgap_set_sensor_data(bgp, id, data);
423 422
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d2b496750d59..4ddfa60c9222 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2399,17 +2399,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
2399 2399
2400 poll_wait(file, &tty->read_wait, wait); 2400 poll_wait(file, &tty->read_wait, wait);
2401 poll_wait(file, &tty->write_wait, wait); 2401 poll_wait(file, &tty->write_wait, wait);
2402 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2403 mask |= POLLHUP;
2404 if (input_available_p(tty, 1)) 2402 if (input_available_p(tty, 1))
2405 mask |= POLLIN | POLLRDNORM; 2403 mask |= POLLIN | POLLRDNORM;
2406 else if (mask & POLLHUP) {
2407 tty_flush_to_ldisc(tty);
2408 if (input_available_p(tty, 1))
2409 mask |= POLLIN | POLLRDNORM;
2410 }
2411 if (tty->packet && tty->link->ctrl_status) 2404 if (tty->packet && tty->link->ctrl_status)
2412 mask |= POLLPRI | POLLIN | POLLRDNORM; 2405 mask |= POLLPRI | POLLIN | POLLRDNORM;
2406 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2407 mask |= POLLHUP;
2413 if (tty_hung_up_p(file)) 2408 if (tty_hung_up_p(file))
2414 mask |= POLLHUP; 2409 mask |= POLLHUP;
2415 if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { 2410 if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 31feeb2d0a66..d1f8dc6aabcb 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
1815} 1815}
1816 1816
1817static int 1817static int
1818pci_wch_ch382_setup(struct serial_private *priv, 1818pci_wch_ch38x_setup(struct serial_private *priv,
1819 const struct pciserial_board *board, 1819 const struct pciserial_board *board,
1820 struct uart_8250_port *port, int idx) 1820 struct uart_8250_port *port, int idx)
1821{ 1821{
@@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv,
1880 1880
1881#define PCIE_VENDOR_ID_WCH 0x1c00 1881#define PCIE_VENDOR_ID_WCH 0x1c00
1882#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 1882#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
1883#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
1883 1884
1884/* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 1885/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
1885#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 1886#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2571 .subdevice = PCI_ANY_ID, 2572 .subdevice = PCI_ANY_ID,
2572 .setup = pci_wch_ch353_setup, 2573 .setup = pci_wch_ch353_setup,
2573 }, 2574 },
2574 /* WCH CH382 2S1P card (16750 clone) */ 2575 /* WCH CH382 2S1P card (16850 clone) */
2575 { 2576 {
2576 .vendor = PCIE_VENDOR_ID_WCH, 2577 .vendor = PCIE_VENDOR_ID_WCH,
2577 .device = PCIE_DEVICE_ID_WCH_CH382_2S1P, 2578 .device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
2578 .subvendor = PCI_ANY_ID, 2579 .subvendor = PCI_ANY_ID,
2579 .subdevice = PCI_ANY_ID, 2580 .subdevice = PCI_ANY_ID,
2580 .setup = pci_wch_ch382_setup, 2581 .setup = pci_wch_ch38x_setup,
2582 },
2583 /* WCH CH384 4S card (16850 clone) */
2584 {
2585 .vendor = PCIE_VENDOR_ID_WCH,
2586 .device = PCIE_DEVICE_ID_WCH_CH384_4S,
2587 .subvendor = PCI_ANY_ID,
2588 .subdevice = PCI_ANY_ID,
2589 .setup = pci_wch_ch38x_setup,
2581 }, 2590 },
2582 /* 2591 /*
2583 * ASIX devices with FIFO bug 2592 * ASIX devices with FIFO bug
@@ -2876,6 +2885,7 @@ enum pci_board_num_t {
2876 pbn_fintek_4, 2885 pbn_fintek_4,
2877 pbn_fintek_8, 2886 pbn_fintek_8,
2878 pbn_fintek_12, 2887 pbn_fintek_12,
2888 pbn_wch384_4,
2879}; 2889};
2880 2890
2881/* 2891/*
@@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = {
3675 .base_baud = 115200, 3685 .base_baud = 115200,
3676 .first_offset = 0x40, 3686 .first_offset = 0x40,
3677 }, 3687 },
3688
3689 [pbn_wch384_4] = {
3690 .flags = FL_BASE0,
3691 .num_ports = 4,
3692 .base_baud = 115200,
3693 .uart_offset = 8,
3694 .first_offset = 0xC0,
3695 },
3678}; 3696};
3679 3697
3680static const struct pci_device_id blacklist[] = { 3698static const struct pci_device_id blacklist[] = {
@@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = {
3687 { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ 3705 { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
3688 { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ 3706 { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
3689 { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ 3707 { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
3708 { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
3690}; 3709};
3691 3710
3692/* 3711/*
@@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = {
5400 PCI_ANY_ID, PCI_ANY_ID, 5419 PCI_ANY_ID, PCI_ANY_ID,
5401 0, 0, pbn_b0_bt_2_115200 }, 5420 0, 0, pbn_b0_bt_2_115200 },
5402 5421
5422 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
5423 PCI_ANY_ID, PCI_ANY_ID,
5424 0, 0, pbn_wch384_4 },
5425
5403 /* 5426 /*
5404 * Commtech, Inc. Fastcom adapters 5427 * Commtech, Inc. Fastcom adapters
5405 */ 5428 */
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 19273e31d224..107e80722575 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1757,32 +1757,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
1757#endif 1757#endif
1758 1758
1759#if defined(CONFIG_ARCH_EXYNOS) 1759#if defined(CONFIG_ARCH_EXYNOS)
1760#define EXYNOS_COMMON_SERIAL_DRV_DATA \
1761 .info = &(struct s3c24xx_uart_info) { \
1762 .name = "Samsung Exynos UART", \
1763 .type = PORT_S3C6400, \
1764 .has_divslot = 1, \
1765 .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
1766 .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
1767 .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
1768 .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
1769 .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
1770 .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
1771 .def_clk_sel = S3C2410_UCON_CLKSEL0, \
1772 .num_clks = 1, \
1773 .clksel_mask = 0, \
1774 .clksel_shift = 0, \
1775 }, \
1776 .def_cfg = &(struct s3c2410_uartcfg) { \
1777 .ucon = S5PV210_UCON_DEFAULT, \
1778 .ufcon = S5PV210_UFCON_DEFAULT, \
1779 .has_fracval = 1, \
1780 } \
1781
1760static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = { 1782static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
1761 .info = &(struct s3c24xx_uart_info) { 1783 EXYNOS_COMMON_SERIAL_DRV_DATA,
1762 .name = "Samsung Exynos4 UART",
1763 .type = PORT_S3C6400,
1764 .has_divslot = 1,
1765 .rx_fifomask = S5PV210_UFSTAT_RXMASK,
1766 .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
1767 .rx_fifofull = S5PV210_UFSTAT_RXFULL,
1768 .tx_fifofull = S5PV210_UFSTAT_TXFULL,
1769 .tx_fifomask = S5PV210_UFSTAT_TXMASK,
1770 .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
1771 .def_clk_sel = S3C2410_UCON_CLKSEL0,
1772 .num_clks = 1,
1773 .clksel_mask = 0,
1774 .clksel_shift = 0,
1775 },
1776 .def_cfg = &(struct s3c2410_uartcfg) {
1777 .ucon = S5PV210_UCON_DEFAULT,
1778 .ufcon = S5PV210_UFCON_DEFAULT,
1779 .has_fracval = 1,
1780 },
1781 .fifosize = { 256, 64, 16, 16 }, 1784 .fifosize = { 256, 64, 16, 16 },
1782}; 1785};
1786
1787static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
1788 EXYNOS_COMMON_SERIAL_DRV_DATA,
1789 .fifosize = { 64, 256, 16, 256 },
1790};
1791
1783#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data) 1792#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
1793#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data)
1784#else 1794#else
1785#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL 1795#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
1796#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
1786#endif 1797#endif
1787 1798
1788static struct platform_device_id s3c24xx_serial_driver_ids[] = { 1799static struct platform_device_id s3c24xx_serial_driver_ids[] = {
@@ -1804,6 +1815,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = {
1804 }, { 1815 }, {
1805 .name = "exynos4210-uart", 1816 .name = "exynos4210-uart",
1806 .driver_data = EXYNOS4210_SERIAL_DRV_DATA, 1817 .driver_data = EXYNOS4210_SERIAL_DRV_DATA,
1818 }, {
1819 .name = "exynos5433-uart",
1820 .driver_data = EXYNOS5433_SERIAL_DRV_DATA,
1807 }, 1821 },
1808 { }, 1822 { },
1809}; 1823};
@@ -1823,6 +1837,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
1823 .data = (void *)S5PV210_SERIAL_DRV_DATA }, 1837 .data = (void *)S5PV210_SERIAL_DRV_DATA },
1824 { .compatible = "samsung,exynos4210-uart", 1838 { .compatible = "samsung,exynos4210-uart",
1825 .data = (void *)EXYNOS4210_SERIAL_DRV_DATA }, 1839 .data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
1840 { .compatible = "samsung,exynos5433-uart",
1841 .data = (void *)EXYNOS5433_SERIAL_DRV_DATA },
1826 {}, 1842 {},
1827}; 1843};
1828MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match); 1844MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 57ca61b14670..984605bb5bf1 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2164,7 +2164,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
2164 break; 2164 break;
2165 } 2165 }
2166 2166
2167 dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n", 2167 printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
2168 port->dev ? dev_name(port->dev) : "",
2169 port->dev ? ": " : "",
2168 drv->dev_name, 2170 drv->dev_name,
2169 drv->tty_driver->name_base + port->line, 2171 drv->tty_driver->name_base + port->line,
2170 address, port->irq, port->uartclk / 16, uart_type(port)); 2172 address, port->irq, port->uartclk / 16, uart_type(port));
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4f35b43e2475..51f066aa375e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty)
1464 driver->subtype == PTY_TYPE_MASTER) 1464 driver->subtype == PTY_TYPE_MASTER)
1465 return -EIO; 1465 return -EIO;
1466 1466
1467 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
1468 return -EBUSY;
1469
1467 tty->count++; 1470 tty->count++;
1468 1471
1469 WARN_ON(!tty->ldisc); 1472 WARN_ON(!tty->ldisc);
@@ -2106,10 +2109,6 @@ retry_open:
2106 retval = -ENODEV; 2109 retval = -ENODEV;
2107 filp->f_flags = saved_flags; 2110 filp->f_flags = saved_flags;
2108 2111
2109 if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
2110 !capable(CAP_SYS_ADMIN))
2111 retval = -EBUSY;
2112
2113 if (retval) { 2112 if (retval) {
2114#ifdef TTY_DEBUG_HANGUP 2113#ifdef TTY_DEBUG_HANGUP
2115 printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__, 2114 printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5b9825a4538a..a57dc8866fc5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
669 if (!ci) 669 if (!ci)
670 return -ENOMEM; 670 return -ENOMEM;
671 671
672 platform_set_drvdata(pdev, ci);
673 ci->dev = dev; 672 ci->dev = dev;
674 ci->platdata = dev_get_platdata(dev); 673 ci->platdata = dev_get_platdata(dev);
675 ci->imx28_write_fix = !!(ci->platdata->flags & 674 ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
783 } 782 }
784 } 783 }
785 784
785 platform_set_drvdata(pdev, ci);
786 ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED, 786 ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
787 ci->platdata->name, ci); 787 ci->platdata->name, ci);
788 if (ret) 788 if (ret)
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index c1694cff1eaf..48731d0bab35 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci)
91 if (!hcd) 91 if (!hcd)
92 return -ENOMEM; 92 return -ENOMEM;
93 93
94 dev_set_drvdata(ci->dev, ci);
94 hcd->rsrc_start = ci->hw_bank.phys; 95 hcd->rsrc_start = ci->hw_bank.phys;
95 hcd->rsrc_len = ci->hw_bank.size; 96 hcd->rsrc_len = ci->hw_bank.size;
96 hcd->regs = ci->hw_bank.abs; 97 hcd->regs = ci->hw_bank.abs;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 200168ec2d75..79242008085b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2567,7 +2567,7 @@ error:
2567 * s3c_hsotg_ep_disable - disable given endpoint 2567 * s3c_hsotg_ep_disable - disable given endpoint
2568 * @ep: The endpoint to disable. 2568 * @ep: The endpoint to disable.
2569 */ 2569 */
2570static int s3c_hsotg_ep_disable(struct usb_ep *ep) 2570static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
2571{ 2571{
2572 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2572 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2573 struct dwc2_hsotg *hsotg = hs_ep->parent; 2573 struct dwc2_hsotg *hsotg = hs_ep->parent;
@@ -2588,7 +2588,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2588 2588
2589 spin_lock_irqsave(&hsotg->lock, flags); 2589 spin_lock_irqsave(&hsotg->lock, flags);
2590 /* terminate all requests with shutdown */ 2590 /* terminate all requests with shutdown */
2591 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false); 2591 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force);
2592 2592
2593 hsotg->fifo_map &= ~(1<<hs_ep->fifo_index); 2593 hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
2594 hs_ep->fifo_index = 0; 2594 hs_ep->fifo_index = 0;
@@ -2609,6 +2609,10 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2609 return 0; 2609 return 0;
2610} 2610}
2611 2611
2612static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2613{
2614 return s3c_hsotg_ep_disable_force(ep, false);
2615}
2612/** 2616/**
2613 * on_list - check request is on the given endpoint 2617 * on_list - check request is on the given endpoint
2614 * @ep: The endpoint to check. 2618 * @ep: The endpoint to check.
@@ -2924,7 +2928,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
2924 2928
2925 /* all endpoints should be shutdown */ 2929 /* all endpoints should be shutdown */
2926 for (ep = 1; ep < hsotg->num_of_eps; ep++) 2930 for (ep = 1; ep < hsotg->num_of_eps; ep++)
2927 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 2931 s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true);
2928 2932
2929 spin_lock_irqsave(&hsotg->lock, flags); 2933 spin_lock_irqsave(&hsotg->lock, flags);
2930 2934
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7c4faf738747..b642a2f998f9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -33,6 +33,8 @@
33#define PCI_DEVICE_ID_INTEL_BYT 0x0f37 33#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
34#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e 34#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
35#define PCI_DEVICE_ID_INTEL_BSW 0x22B7 35#define PCI_DEVICE_ID_INTEL_BSW 0x22B7
36#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
37#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
36 38
37struct dwc3_pci { 39struct dwc3_pci {
38 struct device *dev; 40 struct device *dev;
@@ -219,6 +221,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
219 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), }, 221 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
220 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 222 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
221 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 223 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
224 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
225 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
222 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 226 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
223 { } /* Terminating Entry */ 227 { } /* Terminating Entry */
224}; 228};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f03b136ecfce..8f65ab3a3b92 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
882 882
883 if (i == (request->num_mapped_sgs - 1) || 883 if (i == (request->num_mapped_sgs - 1) ||
884 sg_is_last(s)) { 884 sg_is_last(s)) {
885 if (list_is_last(&req->list, 885 if (list_empty(&dep->request_list))
886 &dep->request_list))
887 last_one = true; 886 last_one = true;
888 chain = false; 887 chain = false;
889 } 888 }
@@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
901 if (last_one) 900 if (last_one)
902 break; 901 break;
903 } 902 }
903
904 if (last_one)
905 break;
904 } else { 906 } else {
905 dma = req->request.dma; 907 dma = req->request.dma;
906 length = req->request.length; 908 length = req->request.length;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 6e04e302dc3a..a1bc3e3a0b09 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f,
399 value = __le16_to_cpu(ctrl->wValue); 399 value = __le16_to_cpu(ctrl->wValue);
400 length = __le16_to_cpu(ctrl->wLength); 400 length = __le16_to_cpu(ctrl->wLength);
401 401
402 VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x " 402 VDBG(cdev,
403 "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value); 403 "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
404 __func__, ctrl->bRequestType, ctrl->bRequest, value);
404 405
405 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { 406 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
406 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 407 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a90440300735..259b656c0b3e 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
520 req = midi_alloc_ep_req(ep, midi->buflen); 520 req = midi_alloc_ep_req(ep, midi->buflen);
521 521
522 if (!req) { 522 if (!req) {
523 ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n"); 523 ERROR(midi, "%s: alloc_ep_request failed\n", __func__);
524 return; 524 return;
525 } 525 }
526 req->length = 0; 526 req->length = 0;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index f7b203293205..e9715845f82e 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -897,7 +897,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
897 struct f_uac1_opts *opts; 897 struct f_uac1_opts *opts;
898 898
899 opts = container_of(f, struct f_uac1_opts, func_inst); 899 opts = container_of(f, struct f_uac1_opts, func_inst);
900 gaudio_cleanup(opts->card);
901 if (opts->fn_play_alloc) 900 if (opts->fn_play_alloc)
902 kfree(opts->fn_play); 901 kfree(opts->fn_play);
903 if (opts->fn_cap_alloc) 902 if (opts->fn_cap_alloc)
@@ -935,6 +934,7 @@ static void f_audio_free(struct usb_function *f)
935 struct f_audio *audio = func_to_audio(f); 934 struct f_audio *audio = func_to_audio(f);
936 struct f_uac1_opts *opts; 935 struct f_uac1_opts *opts;
937 936
937 gaudio_cleanup(&audio->card);
938 opts = container_of(f->fi, struct f_uac1_opts, func_inst); 938 opts = container_of(f->fi, struct f_uac1_opts, func_inst);
939 kfree(audio); 939 kfree(audio);
940 mutex_lock(&opts->lock); 940 mutex_lock(&opts->lock);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index c744e4975d74..db49ec4c748e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
441 kbuf = memdup_user(buf, len); 441 kbuf = memdup_user(buf, len);
442 if (IS_ERR(kbuf)) { 442 if (IS_ERR(kbuf)) {
443 value = PTR_ERR(kbuf); 443 value = PTR_ERR(kbuf);
444 kbuf = NULL;
444 goto free1; 445 goto free1;
445 } 446 }
446 447
@@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
449 data->name, len, (int) value); 450 data->name, len, (int) value);
450free1: 451free1:
451 mutex_unlock(&data->lock); 452 mutex_unlock(&data->lock);
453 kfree (kbuf);
452 return value; 454 return value;
453} 455}
454 456
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index ce882371786b..9f93bed42052 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
716 req->using_dma = 1; 716 req->using_dma = 1;
717 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) 717 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
718 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE 718 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
719 | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; 719 | USBA_DMA_END_BUF_EN;
720 720
721 if (ep->is_in) 721 if (!ep->is_in)
722 req->ctrl |= USBA_DMA_END_BUF_EN; 722 req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
723 723
724 /* 724 /*
725 * Add this request to the queue and submit for DMA if 725 * Add this request to the queue and submit for DMA if
@@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
828{ 828{
829 struct usba_ep *ep = to_usba_ep(_ep); 829 struct usba_ep *ep = to_usba_ep(_ep);
830 struct usba_udc *udc = ep->udc; 830 struct usba_udc *udc = ep->udc;
831 struct usba_request *req = to_usba_req(_req); 831 struct usba_request *req;
832 unsigned long flags; 832 unsigned long flags;
833 u32 status; 833 u32 status;
834 834
@@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
837 837
838 spin_lock_irqsave(&udc->lock, flags); 838 spin_lock_irqsave(&udc->lock, flags);
839 839
840 list_for_each_entry(req, &ep->queue, queue) {
841 if (&req->req == _req)
842 break;
843 }
844
845 if (&req->req != _req) {
846 spin_unlock_irqrestore(&udc->lock, flags);
847 return -EINVAL;
848 }
849
840 if (req->using_dma) { 850 if (req->using_dma) {
841 /* 851 /*
842 * If this request is currently being transferred, 852 * If this request is currently being transferred,
@@ -1563,7 +1573,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1563 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1573 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1564 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); 1574 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1565 receive_data(ep); 1575 receive_data(ep);
1566 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1567 } 1576 }
1568} 1577}
1569 1578
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ff67ceac77c4..d4fe8d769bd6 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
718 struct bdc *bdc; 718 struct bdc *bdc;
719 int ret = 0; 719 int ret = 0;
720 720
721 bdc = ep->bdc;
722 if (!req || !ep || !ep->usb_ep.desc) 721 if (!req || !ep || !ep->usb_ep.desc)
723 return -EINVAL; 722 return -EINVAL;
724 723
724 bdc = ep->bdc;
725
725 req->usb_req.actual = 0; 726 req->usb_req.actual = 0;
726 req->usb_req.status = -EINPROGRESS; 727 req->usb_req.status = -EINPROGRESS;
727 req->epnum = ep->ep_num; 728 req->epnum = ep->ep_num;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index e113fd73aeae..f9a332775c47 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
1581 else 1581 else
1582 next = (now + 2 + 7) & ~0x07; /* full frame cache */ 1582 next = (now + 2 + 7) & ~0x07; /* full frame cache */
1583 1583
1584 /* If needed, initialize last_iso_frame so that this URB will be seen */
1585 if (ehci->isoc_count == 0)
1586 ehci->last_iso_frame = now >> 3;
1587
1584 /* 1588 /*
1585 * Use ehci->last_iso_frame as the base. There can't be any 1589 * Use ehci->last_iso_frame as the base. There can't be any
1586 * TDs scheduled for earlier than that. 1590 * TDs scheduled for earlier than that.
@@ -1600,11 +1604,11 @@ iso_stream_schedule (
1600 */ 1604 */
1601 now2 = (now - base) & (mod - 1); 1605 now2 = (now - base) & (mod - 1);
1602 1606
1603 /* Is the schedule already full? */ 1607 /* Is the schedule about to wrap around? */
1604 if (unlikely(!empty && start < period)) { 1608 if (unlikely(!empty && start < period)) {
1605 ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n", 1609 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1606 urb, stream->next_uframe, base, period, mod); 1610 urb, stream->next_uframe, base, period, mod);
1607 status = -ENOSPC; 1611 status = -EFBIG;
1608 goto fail; 1612 goto fail;
1609 } 1613 }
1610 1614
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
1671 urb->start_frame = start & (mod - 1); 1675 urb->start_frame = start & (mod - 1);
1672 if (!stream->highspeed) 1676 if (!stream->highspeed)
1673 urb->start_frame >>= 3; 1677 urb->start_frame >>= 3;
1674
1675 /* Make sure scan_isoc() sees these */
1676 if (ehci->isoc_count == 0)
1677 ehci->last_iso_frame = now >> 3;
1678 return status; 1678 return status;
1679 1679
1680 fail: 1680 fail:
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 19a9af1b4d74..ff9af29b4e9f 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
451 451
452 u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); 452 u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
453 if (IS_ERR(u_phy)) { 453 if (IS_ERR(u_phy)) {
454 err = PTR_ERR(u_phy); 454 err = -EPROBE_DEFER;
455 goto cleanup_clk_en; 455 goto cleanup_clk_en;
456 } 456 }
457 hcd->usb_phy = u_phy; 457 hcd->usb_phy = u_phy;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index dd483c13565b..ce636466edb7 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
567{ 567{
568 void __iomem *base; 568 void __iomem *base;
569 u32 control; 569 u32 control;
570 u32 fminterval; 570 u32 fminterval = 0;
571 bool no_fminterval = false;
571 int cnt; 572 int cnt;
572 573
573 if (!mmio_resource_enabled(pdev, 0)) 574 if (!mmio_resource_enabled(pdev, 0))
@@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
577 if (base == NULL) 578 if (base == NULL)
578 return; 579 return;
579 580
581 /*
582 * ULi M5237 OHCI controller locks the whole system when accessing
583 * the OHCI_FMINTERVAL offset.
584 */
585 if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
586 no_fminterval = true;
587
580 control = readl(base + OHCI_CONTROL); 588 control = readl(base + OHCI_CONTROL);
581 589
582/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ 590/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
615 } 623 }
616 624
617 /* software reset of the controller, preserving HcFmInterval */ 625 /* software reset of the controller, preserving HcFmInterval */
618 fminterval = readl(base + OHCI_FMINTERVAL); 626 if (!no_fminterval)
627 fminterval = readl(base + OHCI_FMINTERVAL);
628
619 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 629 writel(OHCI_HCR, base + OHCI_CMDSTATUS);
620 630
621 /* reset requires max 10 us delay */ 631 /* reset requires max 10 us delay */
@@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
624 break; 634 break;
625 udelay(1); 635 udelay(1);
626 } 636 }
627 writel(fminterval, base + OHCI_FMINTERVAL); 637
638 if (!no_fminterval)
639 writel(fminterval, base + OHCI_FMINTERVAL);
628 640
629 /* Now the controller is safely in SUSPEND and nothing can wake it up */ 641 /* Now the controller is safely in SUSPEND and nothing can wake it up */
630 iounmap(base); 642 iounmap(base);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 142b601f9563..7f76c8a12f89 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
82 "must be suspended extra slowly", 82 "must be suspended extra slowly",
83 pdev->revision); 83 pdev->revision);
84 } 84 }
85 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
86 xhci->quirks |= XHCI_BROKEN_STREAMS;
85 /* Fresco Logic confirms: all revisions of this chip do not 87 /* Fresco Logic confirms: all revisions of this chip do not
86 * support MSI, even though some of them claim to in their PCI 88 * support MSI, even though some of them claim to in their PCI
87 * capabilities. 89 * capabilities.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 01fcbb5eb06e..c50d8d202618 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3803 return -EINVAL; 3803 return -EINVAL;
3804 } 3804 }
3805 3805
3806 if (setup == SETUP_CONTEXT_ONLY) {
3807 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3808 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3809 SLOT_STATE_DEFAULT) {
3810 xhci_dbg(xhci, "Slot already in default state\n");
3811 return 0;
3812 }
3813 }
3814
3806 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3815 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3807 if (!command) 3816 if (!command)
3808 return -ENOMEM; 3817 return -ENOMEM;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 9d68372dd9aa..b005010240e5 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -72,6 +72,8 @@ config USB_MUSB_DA8XX
72 72
73config USB_MUSB_TUSB6010 73config USB_MUSB_TUSB6010
74 tristate "TUSB6010" 74 tristate "TUSB6010"
75 depends on ARCH_OMAP2PLUS || COMPILE_TEST
76 depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
75 77
76config USB_MUSB_OMAP2PLUS 78config USB_MUSB_OMAP2PLUS
77 tristate "OMAP2430 and onwards" 79 tristate "OMAP2430 and onwards"
@@ -85,6 +87,7 @@ config USB_MUSB_AM35X
85config USB_MUSB_DSPS 87config USB_MUSB_DSPS
86 tristate "TI DSPS platforms" 88 tristate "TI DSPS platforms"
87 select USB_MUSB_AM335X_CHILD 89 select USB_MUSB_AM335X_CHILD
90 depends on ARCH_OMAP2PLUS || COMPILE_TEST
88 depends on OF_IRQ 91 depends on OF_IRQ
89 92
90config USB_MUSB_BLACKFIN 93config USB_MUSB_BLACKFIN
@@ -93,6 +96,7 @@ config USB_MUSB_BLACKFIN
93 96
94config USB_MUSB_UX500 97config USB_MUSB_UX500
95 tristate "Ux500 platforms" 98 tristate "Ux500 platforms"
99 depends on ARCH_U8500 || COMPILE_TEST
96 100
97config USB_MUSB_JZ4740 101config USB_MUSB_JZ4740
98 tristate "JZ4740" 102 tristate "JZ4740"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index a441a2de8619..178250145613 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
63 bfin_write16(addr + offset, data); 63 bfin_write16(addr + offset, data);
64} 64}
65 65
66static void binf_writel(void __iomem *addr, unsigned offset, u32 data) 66static void bfin_writel(void __iomem *addr, unsigned offset, u32 data)
67{ 67{
68 bfin_write16(addr + offset, (u16)data); 68 bfin_write16(addr + offset, (u16)data);
69} 69}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f64fd964dc6d..c39a16ad7832 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -628,9 +628,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
628 ret = of_property_read_string_index(np, "dma-names", i, &str); 628 ret = of_property_read_string_index(np, "dma-names", i, &str);
629 if (ret) 629 if (ret)
630 goto err; 630 goto err;
631 if (!strncmp(str, "tx", 2)) 631 if (strstarts(str, "tx"))
632 is_tx = 1; 632 is_tx = 1;
633 else if (!strncmp(str, "rx", 2)) 633 else if (strstarts(str, "rx"))
634 is_tx = 0; 634 is_tx = 0;
635 else { 635 else {
636 dev_err(dev, "Wrong dmatype %s\n", str); 636 dev_err(dev, "Wrong dmatype %s\n", str);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index ad3701a97389..48131aa8472c 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = {
59 { "RxMaxPp", MUSB_RXMAXP, 16 }, 59 { "RxMaxPp", MUSB_RXMAXP, 16 },
60 { "RxCSR", MUSB_RXCSR, 16 }, 60 { "RxCSR", MUSB_RXCSR, 16 },
61 { "RxCount", MUSB_RXCOUNT, 16 }, 61 { "RxCount", MUSB_RXCOUNT, 16 },
62 { "ConfigData", MUSB_CONFIGDATA,8 },
63 { "IntrRxE", MUSB_INTRRXE, 16 }, 62 { "IntrRxE", MUSB_INTRRXE, 16 },
64 { "IntrTxE", MUSB_INTRTXE, 16 }, 63 { "IntrTxE", MUSB_INTRTXE, 16 },
65 { "IntrUsbE", MUSB_INTRUSBE, 8 }, 64 { "IntrUsbE", MUSB_INTRUSBE, 8 },
66 { "DevCtl", MUSB_DEVCTL, 8 }, 65 { "DevCtl", MUSB_DEVCTL, 8 },
67 { "BabbleCtl", MUSB_BABBLE_CTL,8 },
68 { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
69 { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
70 { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
71 { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
72 { "VControl", 0x68, 32 }, 66 { "VControl", 0x68, 32 },
73 { "HWVers", 0x69, 16 }, 67 { "HWVers", 0x69, 16 },
74 { "EPInfo", MUSB_EPINFO, 8 },
75 { "RAMInfo", MUSB_RAMINFO, 8 },
76 { "LinkInfo", MUSB_LINKINFO, 8 }, 68 { "LinkInfo", MUSB_LINKINFO, 8 },
77 { "VPLen", MUSB_VPLEN, 8 }, 69 { "VPLen", MUSB_VPLEN, 8 },
78 { "HS_EOF1", MUSB_HS_EOF1, 8 }, 70 { "HS_EOF1", MUSB_HS_EOF1, 8 },
@@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = {
103 { "DMA_CNTLch7", 0x274, 16 }, 95 { "DMA_CNTLch7", 0x274, 16 },
104 { "DMA_ADDRch7", 0x278, 32 }, 96 { "DMA_ADDRch7", 0x278, 32 },
105 { "DMA_COUNTch7", 0x27C, 32 }, 97 { "DMA_COUNTch7", 0x27C, 32 },
98#ifndef CONFIG_BLACKFIN
99 { "ConfigData", MUSB_CONFIGDATA,8 },
100 { "BabbleCtl", MUSB_BABBLE_CTL,8 },
101 { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
102 { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
103 { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
104 { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
105 { "EPInfo", MUSB_EPINFO, 8 },
106 { "RAMInfo", MUSB_RAMINFO, 8 },
107#endif
106 { } /* Terminating Entry */ 108 { } /* Terminating Entry */
107}; 109};
108 110
@@ -197,30 +199,30 @@ static ssize_t musb_test_mode_write(struct file *file,
197 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 199 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
198 return -EFAULT; 200 return -EFAULT;
199 201
200 if (!strncmp(buf, "force host", 9)) 202 if (strstarts(buf, "force host"))
201 test = MUSB_TEST_FORCE_HOST; 203 test = MUSB_TEST_FORCE_HOST;
202 204
203 if (!strncmp(buf, "fifo access", 11)) 205 if (strstarts(buf, "fifo access"))
204 test = MUSB_TEST_FIFO_ACCESS; 206 test = MUSB_TEST_FIFO_ACCESS;
205 207
206 if (!strncmp(buf, "force full-speed", 15)) 208 if (strstarts(buf, "force full-speed"))
207 test = MUSB_TEST_FORCE_FS; 209 test = MUSB_TEST_FORCE_FS;
208 210
209 if (!strncmp(buf, "force high-speed", 15)) 211 if (strstarts(buf, "force high-speed"))
210 test = MUSB_TEST_FORCE_HS; 212 test = MUSB_TEST_FORCE_HS;
211 213
212 if (!strncmp(buf, "test packet", 10)) { 214 if (strstarts(buf, "test packet")) {
213 test = MUSB_TEST_PACKET; 215 test = MUSB_TEST_PACKET;
214 musb_load_testpacket(musb); 216 musb_load_testpacket(musb);
215 } 217 }
216 218
217 if (!strncmp(buf, "test K", 6)) 219 if (strstarts(buf, "test K"))
218 test = MUSB_TEST_K; 220 test = MUSB_TEST_K;
219 221
220 if (!strncmp(buf, "test J", 6)) 222 if (strstarts(buf, "test J"))
221 test = MUSB_TEST_J; 223 test = MUSB_TEST_J;
222 224
223 if (!strncmp(buf, "test SE0 NAK", 12)) 225 if (strstarts(buf, "test SE0 NAK"))
224 test = MUSB_TEST_SE0_NAK; 226 test = MUSB_TEST_SE0_NAK;
225 227
226 musb_writeb(musb->mregs, MUSB_TESTMODE, test); 228 musb_writeb(musb->mregs, MUSB_TESTMODE, test);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 23d474d3d7f4..883a9adfdfff 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
2663 if (musb->port_mode == MUSB_PORT_MODE_GADGET) 2663 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2664 return; 2664 return;
2665 usb_remove_hcd(musb->hcd); 2665 usb_remove_hcd(musb->hcd);
2666 musb->hcd = NULL;
2667} 2666}
2668 2667
2669void musb_host_free(struct musb *musb) 2668void musb_host_free(struct musb *musb)
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 699e38c73d82..697a741a0cb1 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg)
338static void mv_otg_update_state(struct mv_otg *mvotg) 338static void mv_otg_update_state(struct mv_otg *mvotg)
339{ 339{
340 struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; 340 struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
341 struct usb_phy *phy = &mvotg->phy;
342 int old_state = mvotg->phy.otg->state; 341 int old_state = mvotg->phy.otg->state;
343 342
344 switch (old_state) { 343 switch (old_state) {
@@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
858{ 857{
859 struct mv_otg *mvotg = platform_get_drvdata(pdev); 858 struct mv_otg *mvotg = platform_get_drvdata(pdev);
860 859
861 if (mvotg->phy.state != OTG_STATE_B_IDLE) { 860 if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
862 dev_info(&pdev->dev, 861 dev_info(&pdev->dev,
863 "OTG state is not B_IDLE, it is %d!\n", 862 "OTG state is not B_IDLE, it is %d!\n",
864 mvotg->phy.state); 863 mvotg->phy.otg->state);
865 return -EAGAIN; 864 return -EAGAIN;
866 } 865 }
867 866
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index b4066a001ba0..ccfdfb24b240 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
34 return phy; 34 return phy;
35 } 35 }
36 36
37 return ERR_PTR(-ENODEV); 37 return ERR_PTR(-EPROBE_DEFER);
38} 38}
39 39
40static struct usb_phy *__usb_find_phy_dev(struct device *dev, 40static struct usb_phy *__usb_find_phy_dev(struct device *dev,
@@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
59{ 59{
60 struct usb_phy *phy; 60 struct usb_phy *phy;
61 61
62 if (!of_device_is_available(node))
63 return ERR_PTR(-ENODEV);
64
62 list_for_each_entry(phy, &phy_list, head) { 65 list_for_each_entry(phy, &phy_list, head) {
63 if (node != phy->dev->of_node) 66 if (node != phy->dev->of_node)
64 continue; 67 continue;
@@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
66 return phy; 69 return phy;
67 } 70 }
68 71
69 return ERR_PTR(-ENODEV); 72 return ERR_PTR(-EPROBE_DEFER);
70} 73}
71 74
72static void devm_usb_phy_release(struct device *dev, void *res) 75static void devm_usb_phy_release(struct device *dev, void *res)
@@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
190 spin_lock_irqsave(&phy_lock, flags); 193 spin_lock_irqsave(&phy_lock, flags);
191 194
192 phy = __of_usb_find_phy(node); 195 phy = __of_usb_find_phy(node);
193 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 196 if (IS_ERR(phy)) {
194 if (!IS_ERR(phy)) 197 devres_free(ptr);
195 phy = ERR_PTR(-EPROBE_DEFER); 198 goto err1;
199 }
196 200
201 if (!try_module_get(phy->dev->driver->owner)) {
202 phy = ERR_PTR(-ENODEV);
197 devres_free(ptr); 203 devres_free(ptr);
198 goto err1; 204 goto err1;
199 } 205 }
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 8d7fc48b1f30..29fa1c3d0089 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -46,6 +46,8 @@ static struct console usbcons;
46 * ------------------------------------------------------------ 46 * ------------------------------------------------------------
47 */ 47 */
48 48
49static const struct tty_operations usb_console_fake_tty_ops = {
50};
49 51
50/* 52/*
51 * The parsing of the command line works exactly like the 53 * The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
137 goto reset_open_count; 139 goto reset_open_count;
138 } 140 }
139 kref_init(&tty->kref); 141 kref_init(&tty->kref);
140 tty_port_tty_set(&port->port, tty);
141 tty->driver = usb_serial_tty_driver; 142 tty->driver = usb_serial_tty_driver;
142 tty->index = co->index; 143 tty->index = co->index;
144 init_ldsem(&tty->ldisc_sem);
145 INIT_LIST_HEAD(&tty->tty_files);
146 kref_get(&tty->driver->kref);
147 tty->ops = &usb_console_fake_tty_ops;
143 if (tty_init_termios(tty)) { 148 if (tty_init_termios(tty)) {
144 retval = -ENOMEM; 149 retval = -ENOMEM;
145 goto free_tty; 150 goto put_tty;
146 } 151 }
152 tty_port_tty_set(&port->port, tty);
147 } 153 }
148 154
149 /* only call the device specific open if this 155 /* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
161 serial->type->set_termios(tty, port, &dummy); 167 serial->type->set_termios(tty, port, &dummy);
162 168
163 tty_port_tty_set(&port->port, NULL); 169 tty_port_tty_set(&port->port, NULL);
164 kfree(tty); 170 tty_kref_put(tty);
165 } 171 }
166 set_bit(ASYNCB_INITIALIZED, &port->port.flags); 172 set_bit(ASYNCB_INITIALIZED, &port->port.flags);
167 } 173 }
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
177 183
178 fail: 184 fail:
179 tty_port_tty_set(&port->port, NULL); 185 tty_port_tty_set(&port->port, NULL);
180 free_tty: 186 put_tty:
181 kfree(tty); 187 tty_kref_put(tty);
182 reset_open_count: 188 reset_open_count:
183 port->port.count = 0; 189 port->port.count = 0;
184 usb_autopm_put_interface(serial->interface); 190 usb_autopm_put_interface(serial->interface);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 6c4eb3cf5efd..f4c56fc1a9f6 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
123 { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ 123 { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
124 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
124 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 125 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
125 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 126 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
126 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 127 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
128 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
127 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 129 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
128 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 130 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
129 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 131 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1bd192290b08..ccf1df7c4b80 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
286 286
287 res = usb_submit_urb(port->read_urbs[index], mem_flags); 287 res = usb_submit_urb(port->read_urbs[index], mem_flags);
288 if (res) { 288 if (res) {
289 if (res != -EPERM) { 289 if (res != -EPERM && res != -ENODEV) {
290 dev_err(&port->dev, 290 dev_err(&port->dev,
291 "%s - usb_submit_urb failed: %d\n", 291 "%s - usb_submit_urb failed: %d\n",
292 __func__, res); 292 __func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
373 __func__, urb->status); 373 __func__, urb->status);
374 return; 374 return;
375 default: 375 default:
376 dev_err(&port->dev, "%s - nonzero urb status: %d\n", 376 dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
377 __func__, urb->status); 377 __func__, urb->status);
378 goto resubmit; 378 goto resubmit;
379 } 379 }
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 077c714f1285..e07b15ed5814 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
410 } 410 }
411 port = serial->port[msg->port]; 411 port = serial->port[msg->port];
412 p_priv = usb_get_serial_port_data(port); 412 p_priv = usb_get_serial_port_data(port);
413 if (!p_priv)
414 goto resubmit;
413 415
414 /* Update handshaking pin state information */ 416 /* Update handshaking pin state information */
415 old_dcd_state = p_priv->dcd_state; 417 old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
420 422
421 if (old_dcd_state != p_priv->dcd_state) 423 if (old_dcd_state != p_priv->dcd_state)
422 tty_port_tty_hangup(&port->port, true); 424 tty_port_tty_hangup(&port->port, true);
423 425resubmit:
424 /* Resubmit urb so we continue receiving */ 426 /* Resubmit urb so we continue receiving */
425 err = usb_submit_urb(urb, GFP_ATOMIC); 427 err = usb_submit_urb(urb, GFP_ATOMIC);
426 if (err != 0) 428 if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
527 } 529 }
528 port = serial->port[msg->port]; 530 port = serial->port[msg->port];
529 p_priv = usb_get_serial_port_data(port); 531 p_priv = usb_get_serial_port_data(port);
532 if (!p_priv)
533 goto resubmit;
530 534
531 /* Update handshaking pin state information */ 535 /* Update handshaking pin state information */
532 old_dcd_state = p_priv->dcd_state; 536 old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
537 541
538 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 542 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
539 tty_port_tty_hangup(&port->port, true); 543 tty_port_tty_hangup(&port->port, true);
540 544resubmit:
541 /* Resubmit urb so we continue receiving */ 545 /* Resubmit urb so we continue receiving */
542 err = usb_submit_urb(urb, GFP_ATOMIC); 546 err = usb_submit_urb(urb, GFP_ATOMIC);
543 if (err != 0) 547 if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
607 } 611 }
608 port = serial->port[msg->portNumber]; 612 port = serial->port[msg->portNumber];
609 p_priv = usb_get_serial_port_data(port); 613 p_priv = usb_get_serial_port_data(port);
614 if (!p_priv)
615 goto resubmit;
610 616
611 /* Update handshaking pin state information */ 617 /* Update handshaking pin state information */
612 old_dcd_state = p_priv->dcd_state; 618 old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
617 623
618 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 624 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
619 tty_port_tty_hangup(&port->port, true); 625 tty_port_tty_hangup(&port->port, true);
620 626resubmit:
621 /* Resubmit urb so we continue receiving */ 627 /* Resubmit urb so we continue receiving */
622 err = usb_submit_urb(urb, GFP_ATOMIC); 628 err = usb_submit_urb(urb, GFP_ATOMIC);
623 if (err != 0) 629 if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
855 861
856 port = serial->port[0]; 862 port = serial->port[0];
857 p_priv = usb_get_serial_port_data(port); 863 p_priv = usb_get_serial_port_data(port);
864 if (!p_priv)
865 goto resubmit;
858 866
859 /* Update handshaking pin state information */ 867 /* Update handshaking pin state information */
860 old_dcd_state = p_priv->dcd_state; 868 old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
865 873
866 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 874 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
867 tty_port_tty_hangup(&port->port, true); 875 tty_port_tty_hangup(&port->port, true);
868 876resubmit:
869 /* Resubmit urb so we continue receiving */ 877 /* Resubmit urb so we continue receiving */
870 err = usb_submit_urb(urb, GFP_ATOMIC); 878 err = usb_submit_urb(urb, GFP_ATOMIC);
871 if (err != 0) 879 if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
926 934
927 port = serial->port[msg->port]; 935 port = serial->port[msg->port];
928 p_priv = usb_get_serial_port_data(port); 936 p_priv = usb_get_serial_port_data(port);
937 if (!p_priv)
938 goto resubmit;
929 939
930 /* Update handshaking pin state information */ 940 /* Update handshaking pin state information */
931 old_dcd_state = p_priv->dcd_state; 941 old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
934 944
935 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 945 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
936 tty_port_tty_hangup(&port->port, true); 946 tty_port_tty_hangup(&port->port, true);
937 947resubmit:
938 /* Resubmit urb so we continue receiving */ 948 /* Resubmit urb so we continue receiving */
939 err = usb_submit_urb(urb, GFP_ATOMIC); 949 err = usb_submit_urb(urb, GFP_ATOMIC);
940 if (err != 0) 950 if (err != 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7a4c21b4f676..efdcee15b520 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
234 234
235#define QUALCOMM_VENDOR_ID 0x05C6 235#define QUALCOMM_VENDOR_ID 0x05C6
236 236
237#define SIERRA_VENDOR_ID 0x1199
238
237#define CMOTECH_VENDOR_ID 0x16d8 239#define CMOTECH_VENDOR_ID 0x16d8
238#define CMOTECH_PRODUCT_6001 0x6001 240#define CMOTECH_PRODUCT_6001 0x6001
239#define CMOTECH_PRODUCT_CMU_300 0x6002 241#define CMOTECH_PRODUCT_CMU_300 0x6002
@@ -512,7 +514,7 @@ enum option_blacklist_reason {
512 OPTION_BLACKLIST_RESERVED_IF = 2 514 OPTION_BLACKLIST_RESERVED_IF = 2
513}; 515};
514 516
515#define MAX_BL_NUM 8 517#define MAX_BL_NUM 11
516struct option_blacklist_info { 518struct option_blacklist_info {
517 /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */ 519 /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
518 const unsigned long sendsetup; 520 const unsigned long sendsetup;
@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
601 .reserved = BIT(1) | BIT(5), 603 .reserved = BIT(1) | BIT(5),
602}; 604};
603 605
606static const struct option_blacklist_info sierra_mc73xx_blacklist = {
607 .sendsetup = BIT(0) | BIT(2),
608 .reserved = BIT(8) | BIT(10) | BIT(11),
609};
610
604static const struct usb_device_id option_ids[] = { 611static const struct usb_device_id option_ids[] = {
605 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 612 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
606 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 613 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
1098 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1105 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1106 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1100 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1107 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1108 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
1109 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1101 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1110 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1111 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1112 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index cb3e14780a7e..9c63897b3a56 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
142 {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */ 142 {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
143 {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */ 143 {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
144 {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ 144 {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
145 {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
146 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
147 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
148 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 8a6f371ed6e7..9893d696fc97 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf,
69 return 0; 69 return 0;
70 70
71 /* 71 /*
72 * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is 72 * ASMedia has a number of usb3 to sata bridge chips, at the time of
73 * broken on the ASM1051, use the number of streams to differentiate. 73 * this writing the following versions exist:
74 * New ASM1053-s also support 32 streams, but have a different prod-id. 74 * ASM1051 - no uas support version
75 * ASM1051 - with broken (*) uas support
76 * ASM1053 - with working uas support
77 * ASM1153 - with working uas support
78 *
79 * Devices with these chips re-use a number of device-ids over the
80 * entire line, so the device-id is useless to determine if we're
81 * dealing with an ASM1051 (which we want to avoid).
82 *
83 * The ASM1153 can be identified by config.MaxPower == 0,
84 * where as the ASM105x models have config.MaxPower == 36.
85 *
86 * Differentiating between the ASM1053 and ASM1051 is trickier, when
87 * connected over USB-3 we can look at the number of streams supported,
88 * ASM1051 supports 32 streams, where as early ASM1053 versions support
89 * 16 streams, newer ASM1053-s also support 32 streams, but have a
90 * different prod-id.
91 *
92 * (*) ASM1051 chips do work with UAS with some disks (with the
93 * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks
75 */ 94 */
76 if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && 95 if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
77 le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) { 96 (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 ||
78 if (udev->speed < USB_SPEED_SUPER) { 97 le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) {
98 if (udev->actconfig->desc.bMaxPower == 0) {
99 /* ASM1153, do nothing */
100 } else if (udev->speed < USB_SPEED_SUPER) {
79 /* No streams info, assume ASM1051 */ 101 /* No streams info, assume ASM1051 */
80 flags |= US_FL_IGNORE_UAS; 102 flags |= US_FL_IGNORE_UAS;
81 } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { 103 } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
104 /* Possibly an ASM1051, disable uas */
82 flags |= US_FL_IGNORE_UAS; 105 flags |= US_FL_IGNORE_UAS;
83 } 106 }
84 } 107 }
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 18a283d6de1c..6df4357d9ee3 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -40,6 +40,16 @@
40 * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> 40 * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
41 */ 41 */
42 42
43/*
44 * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
45 * commands in UAS mode. Observed with the 1.28 firmware; are there others?
46 */
47UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
48 "Apricorn",
49 "",
50 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
51 US_FL_IGNORE_UAS),
52
43/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ 53/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
44UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999, 54UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
45 "Seagate", 55 "Seagate",
@@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
68 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 78 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
69 US_FL_NO_ATA_1X), 79 US_FL_NO_ATA_1X),
70 80
81/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
82UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
83 "Seagate",
84 "Backup Plus",
85 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
86 US_FL_NO_ATA_1X),
87
88/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
89UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
90 "Seagate",
91 "Backup Plus Desk",
92 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
93 US_FL_NO_ATA_1X),
94
71/* https://bbs.archlinux.org/viewtopic.php?id=183190 */ 95/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
72UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999, 96UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
73 "Seagate", 97 "Seagate",
@@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
82 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 106 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
83 US_FL_NO_ATA_1X), 107 US_FL_NO_ATA_1X),
84 108
109/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
110UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
111 "Seagate",
112 "BUP Fast HDD",
113 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
114 US_FL_NO_ATA_1X),
115
85/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ 116/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
86UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, 117UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
87 "JMicron", 118 "JMicron",
@@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
89 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
90 US_FL_NO_REPORT_OPCODES), 121 US_FL_NO_REPORT_OPCODES),
91 122
92/* Most ASM1051 based devices have issues with uas, blacklist them all */
93/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
94UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
95 "ASMedia",
96 "ASM1051",
97 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
98 US_FL_IGNORE_UAS),
99
100/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 123/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
101UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 124UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
102 "VIA", 125 "VIA",
@@ -104,6 +127,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
104 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 127 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
105 US_FL_NO_ATA_1X), 128 US_FL_NO_ATA_1X),
106 129
130/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
131UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
132 "JMicron",
133 "JMS566",
134 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
135 US_FL_NO_REPORT_OPCODES),
136
107/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
108UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, 138UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
109 "Hitachi", 139 "Hitachi",
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 255201f22126..7cc0122a18ce 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
840 840
841static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 841static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
842{ 842{
843 u8 type;
844 struct vfio_pci_device *vdev; 843 struct vfio_pci_device *vdev;
845 struct iommu_group *group; 844 struct iommu_group *group;
846 int ret; 845 int ret;
847 846
848 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); 847 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
849 if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
850 return -EINVAL; 848 return -EINVAL;
851 849
852 group = iommu_group_get(&pdev->dev); 850 group = iommu_group_get(&pdev->dev);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 14419a8ccbb6..d415d69dc237 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
538 ++headcount; 538 ++headcount;
539 seg += in; 539 seg += in;
540 } 540 }
541 heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); 541 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
542 *iovcount = seg; 542 *iovcount = seg;
543 if (unlikely(log)) 543 if (unlikely(log))
544 *log_num = nlogs; 544 *log_num = nlogs;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 01c01cb3933f..d695b1673ae5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
911 return 0; 911 return 0;
912} 912}
913 913
914static int vhost_scsi_to_tcm_attr(int attr)
915{
916 switch (attr) {
917 case VIRTIO_SCSI_S_SIMPLE:
918 return TCM_SIMPLE_TAG;
919 case VIRTIO_SCSI_S_ORDERED:
920 return TCM_ORDERED_TAG;
921 case VIRTIO_SCSI_S_HEAD:
922 return TCM_HEAD_TAG;
923 case VIRTIO_SCSI_S_ACA:
924 return TCM_ACA_TAG;
925 default:
926 break;
927 }
928 return TCM_SIMPLE_TAG;
929}
930
914static void tcm_vhost_submission_work(struct work_struct *work) 931static void tcm_vhost_submission_work(struct work_struct *work)
915{ 932{
916 struct tcm_vhost_cmd *cmd = 933 struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
936 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 953 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
937 cmd->tvc_cdb, &cmd->tvc_sense_buf[0], 954 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
938 cmd->tvc_lun, cmd->tvc_exp_data_len, 955 cmd->tvc_lun, cmd->tvc_exp_data_len,
939 cmd->tvc_task_attr, cmd->tvc_data_direction, 956 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
940 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 957 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
941 NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); 958 sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
959 cmd->tvc_prot_sgl_count);
942 if (rc < 0) { 960 if (rc < 0) {
943 transport_send_check_condition_and_sense(se_cmd, 961 transport_send_check_condition_and_sense(se_cmd,
944 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 962 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ed71b5347a76..cb807d0ea498 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
713 r = -EFAULT; 713 r = -EFAULT;
714 break; 714 break;
715 } 715 }
716 if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) || 716
717 (a.used_user_addr & (sizeof *vq->used->ring - 1)) || 717 /* Make sure it's safe to cast pointers to vring types. */
718 (a.log_guest_addr & (sizeof *vq->used->ring - 1))) { 718 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
719 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
720 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
721 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
722 (a.log_guest_addr & (sizeof(u64) - 1))) {
719 r = -EINVAL; 723 r = -EINVAL;
720 break; 724 break;
721 } 725 }
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 1c29bd19e3d5..0e5fde1d3ffb 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
636 err = broadsheet_spiflash_read_range(par, start_sector_addr, 636 err = broadsheet_spiflash_read_range(par, start_sector_addr,
637 data_start_addr, sector_buffer); 637 data_start_addr, sector_buffer);
638 if (err) 638 if (err)
639 return err; 639 goto out;
640 } 640 }
641 641
642 /* now we copy our data into the right place in the sector buffer */ 642 /* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
657 err = broadsheet_spiflash_read_range(par, tail_start_addr, 657 err = broadsheet_spiflash_read_range(par, tail_start_addr,
658 tail_len, sector_buffer + tail_start_addr); 658 tail_len, sector_buffer + tail_start_addr);
659 if (err) 659 if (err)
660 return err; 660 goto out;
661 } 661 }
662 662
663 /* if we got here we have the full sector that we want to rewrite. */ 663 /* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
665 /* first erase the sector */ 665 /* first erase the sector */
666 err = broadsheet_spiflash_erase_sector(par, start_sector_addr); 666 err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
667 if (err) 667 if (err)
668 return err; 668 goto out;
669 669
670 /* now write it */ 670 /* now write it */
671 err = broadsheet_spiflash_write_sector(par, start_sector_addr, 671 err = broadsheet_spiflash_write_sector(par, start_sector_addr,
672 sector_buffer, sector_size); 672 sector_buffer, sector_size);
673out:
674 kfree(sector_buffer);
673 return err; 675 return err;
674} 676}
675 677
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 900aa4ecd617..d6cab1fd9a47 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
83 cancel_delayed_work_sync(&info->deferred_work); 83 cancel_delayed_work_sync(&info->deferred_work);
84 84
85 /* Run it immediately */ 85 /* Run it immediately */
86 err = schedule_delayed_work(&info->deferred_work, 0); 86 schedule_delayed_work(&info->deferred_work, 0);
87 mutex_unlock(&inode->i_mutex); 87 mutex_unlock(&inode->i_mutex);
88 return err; 88
89 return 0;
89} 90}
90EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); 91EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
91 92
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 87accdb59c81..ac83ef5cfd7d 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -132,7 +132,6 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
132 .mX_max = 127, 132 .mX_max = 127,
133 .fint_min = 500000, 133 .fint_min = 500000,
134 .fint_max = 2500000, 134 .fint_max = 2500000,
135 .clkdco_max = 1800000000,
136 135
137 .clkdco_min = 500000000, 136 .clkdco_min = 500000000,
138 .clkdco_low = 1000000000, 137 .clkdco_low = 1000000000,
@@ -156,7 +155,6 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
156 .mX_max = 127, 155 .mX_max = 127,
157 .fint_min = 620000, 156 .fint_min = 620000,
158 .fint_max = 2500000, 157 .fint_max = 2500000,
159 .clkdco_max = 1800000000,
160 158
161 .clkdco_min = 750000000, 159 .clkdco_min = 750000000,
162 .clkdco_low = 1500000000, 160 .clkdco_low = 1500000000,
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
index 50bc62c5d367..335ffac224b9 100644
--- a/drivers/video/fbdev/omap2/dss/pll.c
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -97,7 +97,8 @@ int dss_pll_enable(struct dss_pll *pll)
97 return 0; 97 return 0;
98 98
99err_enable: 99err_enable:
100 regulator_disable(pll->regulator); 100 if (pll->regulator)
101 regulator_disable(pll->regulator);
101err_reg: 102err_reg:
102 clk_disable_unprepare(pll->clkin); 103 clk_disable_unprepare(pll->clkin);
103 return r; 104 return r;
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index d51a983075bc..5c2ccab5a958 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -342,6 +342,8 @@ static void sdi_init_output(struct platform_device *pdev)
342 out->output_type = OMAP_DISPLAY_TYPE_SDI; 342 out->output_type = OMAP_DISPLAY_TYPE_SDI;
343 out->name = "sdi.0"; 343 out->name = "sdi.0";
344 out->dispc_channel = OMAP_DSS_CHANNEL_LCD; 344 out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
345 /* We have SDI only on OMAP3, where it's on port 1 */
346 out->port_num = 1;
345 out->ops.sdi = &sdi_ops; 347 out->ops.sdi = &sdi_ops;
346 out->owner = THIS_MODULE; 348 out->owner = THIS_MODULE;
347 349
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 92cac803dee3..1085c0432158 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
402 if (ret) 402 if (ret)
403 return ret; 403 return ret;
404 404
405 if (IS_ENABLED(CONFIG_OF) && of_chosen) { 405 if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
406 for_each_child_of_node(of_chosen, np) { 406 for_each_child_of_node(of_chosen, np) {
407 if (of_device_is_compatible(np, "simple-framebuffer")) 407 if (of_device_is_compatible(np, "simple-framebuffer"))
408 of_platform_device_create(np, NULL, NULL); 408 of_platform_device_create(np, NULL, NULL);
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 940cd196eef5..10fbfd8ab963 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,6 +21,21 @@ static bool nologo;
21module_param(nologo, bool, 0); 21module_param(nologo, bool, 0);
22MODULE_PARM_DESC(nologo, "Disables startup logo"); 22MODULE_PARM_DESC(nologo, "Disables startup logo");
23 23
24/*
25 * Logos are located in the initdata, and will be freed in kernel_init.
26 * Use late_init to mark the logos as freed to prevent any further use.
27 */
28
29static bool logos_freed;
30
31static int __init fb_logo_late_init(void)
32{
33 logos_freed = true;
34 return 0;
35}
36
37late_initcall(fb_logo_late_init);
38
24/* logo's are marked __initdata. Use __init_refok to tell 39/* logo's are marked __initdata. Use __init_refok to tell
25 * modpost that it is intended that this function uses data 40 * modpost that it is intended that this function uses data
26 * marked __initdata. 41 * marked __initdata.
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
29{ 44{
30 const struct linux_logo *logo = NULL; 45 const struct linux_logo *logo = NULL;
31 46
32 if (nologo) 47 if (nologo || logos_freed)
33 return NULL; 48 return NULL;
34 49
35 if (depth >= 1) { 50 if (depth >= 1) {
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 2ef9529809d8..9756f21b809e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
282 282
283 vp_free_vectors(vdev); 283 vp_free_vectors(vdev);
284 kfree(vp_dev->vqs); 284 kfree(vp_dev->vqs);
285 vp_dev->vqs = NULL;
285} 286}
286 287
287static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, 288static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
421 return 0; 422 return 0;
422} 423}
423 424
424void virtio_pci_release_dev(struct device *_d)
425{
426 /*
427 * No need for a release method as we allocate/free
428 * all devices together with the pci devices.
429 * Provide an empty one to avoid getting a warning from core.
430 */
431}
432
433#ifdef CONFIG_PM_SLEEP 425#ifdef CONFIG_PM_SLEEP
434static int virtio_pci_freeze(struct device *dev) 426static int virtio_pci_freeze(struct device *dev)
435{ 427{
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index adddb647b21d..5a497289b7e9 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
126 * - ignore the affinity request if we're using INTX 126 * - ignore the affinity request if we're using INTX
127 */ 127 */
128int vp_set_vq_affinity(struct virtqueue *vq, int cpu); 128int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
129void virtio_pci_release_dev(struct device *);
130 129
131int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 130int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
132 const struct pci_device_id *id); 131 const struct pci_device_id *id);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6c76f0f5658c..a5486e65e04b 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
211 .set_vq_affinity = vp_set_vq_affinity, 211 .set_vq_affinity = vp_set_vq_affinity,
212}; 212};
213 213
214static void virtio_pci_release_dev(struct device *_d)
215{
216 struct virtio_device *vdev = dev_to_virtio(_d);
217 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
218
219 /* As struct device is a kobject, it's not safe to
220 * free the memory (including the reference counter itself)
221 * until it's release callback. */
222 kfree(vp_dev);
223}
224
214/* the PCI probing function */ 225/* the PCI probing function */
215int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 226int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
216 const struct pci_device_id *id) 227 const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
302 pci_iounmap(pci_dev, vp_dev->ioaddr); 313 pci_iounmap(pci_dev, vp_dev->ioaddr);
303 pci_release_regions(pci_dev); 314 pci_release_regions(pci_dev);
304 pci_disable_device(pci_dev); 315 pci_disable_device(pci_dev);
305 kfree(vp_dev);
306} 316}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 2d3e32ebfd15..8729cf68d2fe 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1552{ 1552{
1553 int ret; 1553 int ret;
1554 int type; 1554 int type;
1555 struct btrfs_tree_block_info *info;
1556 struct btrfs_extent_inline_ref *eiref; 1555 struct btrfs_extent_inline_ref *eiref;
1557 1556
1558 if (*ptr == (unsigned long)-1) 1557 if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1573 } 1572 }
1574 1573
1575 /* we can treat both ref types equally here */ 1574 /* we can treat both ref types equally here */
1576 info = (struct btrfs_tree_block_info *)(ei + 1);
1577 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1575 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1578 *out_level = btrfs_tree_block_level(eb, info); 1576
1577 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1578 struct btrfs_tree_block_info *info;
1579
1580 info = (struct btrfs_tree_block_info *)(ei + 1);
1581 *out_level = btrfs_tree_block_level(eb, info);
1582 } else {
1583 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1584 *out_level = (u8)key->offset;
1585 }
1579 1586
1580 if (ret == 1) 1587 if (ret == 1)
1581 *ptr = (unsigned long)-1; 1588 *ptr = (unsigned long)-1;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 054577bddaf2..de4e70fb3cbb 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
1857{ 1857{
1858 struct btrfs_delayed_node *delayed_node; 1858 struct btrfs_delayed_node *delayed_node;
1859 1859
1860 /*
1861 * we don't do delayed inode updates during log recovery because it
1862 * leads to enospc problems. This means we also can't do
1863 * delayed inode refs
1864 */
1865 if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
1866 return -EAGAIN;
1867
1860 delayed_node = btrfs_get_or_create_delayed_node(inode); 1868 delayed_node = btrfs_get_or_create_delayed_node(inode);
1861 if (IS_ERR(delayed_node)) 1869 if (IS_ERR(delayed_node))
1862 return PTR_ERR(delayed_node); 1870 return PTR_ERR(delayed_node);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a80b97100d90..15116585e714 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
3139 struct extent_buffer *leaf; 3139 struct extent_buffer *leaf;
3140 3140
3141 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 3141 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3142 if (ret < 0) 3142 if (ret) {
3143 if (ret > 0)
3144 ret = -ENOENT;
3143 goto fail; 3145 goto fail;
3144 BUG_ON(ret); /* Corruption */ 3146 }
3145 3147
3146 leaf = path->nodes[0]; 3148 leaf = path->nodes[0];
3147 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 3149 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
3149 btrfs_mark_buffer_dirty(leaf); 3151 btrfs_mark_buffer_dirty(leaf);
3150 btrfs_release_path(path); 3152 btrfs_release_path(path);
3151fail: 3153fail:
3152 if (ret) { 3154 if (ret)
3153 btrfs_abort_transaction(trans, root, ret); 3155 btrfs_abort_transaction(trans, root, ret);
3154 return ret; 3156 return ret;
3155 }
3156 return 0;
3157 3157
3158} 3158}
3159 3159
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e687bb0dc73a..8bf326affb94 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6255 6255
6256out_fail: 6256out_fail:
6257 btrfs_end_transaction(trans, root); 6257 btrfs_end_transaction(trans, root);
6258 if (drop_on_err) 6258 if (drop_on_err) {
6259 inode_dec_link_count(inode);
6259 iput(inode); 6260 iput(inode);
6261 }
6260 btrfs_balance_delayed_items(root); 6262 btrfs_balance_delayed_items(root);
6261 btrfs_btree_balance_dirty(root); 6263 btrfs_btree_balance_dirty(root);
6262 return err; 6264 return err;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f2bb13a23f86..9e1569ffbf6e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
2607 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, 2607 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2608 flags, gen, mirror_num, 2608 flags, gen, mirror_num,
2609 have_csum ? csum : NULL); 2609 have_csum ? csum : NULL);
2610skip:
2611 if (ret) 2610 if (ret)
2612 return ret; 2611 return ret;
2612skip:
2613 len -= l; 2613 len -= l;
2614 logical += l; 2614 logical += l;
2615 physical += l; 2615 physical += l;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f5013d92a7e6..c81c0e004588 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1416 } 1416 }
1417 } 1417 }
1418 1418
1419 dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", 1419 dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1420 inode, ceph_vinop(inode), len, locked_page); 1420 inode, ceph_vinop(inode), len, locked_page);
1421 1421
1422 if (len > 0) { 1422 if (len > 0) {
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6e139111fdb2..22b289a3b1c4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,16 +661,16 @@ set_credits(struct TCP_Server_Info *server, const int val)
661 server->ops->set_credits(server, val); 661 server->ops->set_credits(server, val);
662} 662}
663 663
664static inline __u64 664static inline __le64
665get_next_mid64(struct TCP_Server_Info *server) 665get_next_mid64(struct TCP_Server_Info *server)
666{ 666{
667 return server->ops->get_next_mid(server); 667 return cpu_to_le64(server->ops->get_next_mid(server));
668} 668}
669 669
670static inline __le16 670static inline __le16
671get_next_mid(struct TCP_Server_Info *server) 671get_next_mid(struct TCP_Server_Info *server)
672{ 672{
673 __u16 mid = get_next_mid64(server); 673 __u16 mid = server->ops->get_next_mid(server);
674 /* 674 /*
675 * The value in the SMB header should be little endian for easy 675 * The value in the SMB header should be little endian for easy
676 * on-the-wire decoding. 676 * on-the-wire decoding.
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index b333ff60781d..abae6dd2c6b9 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -926,6 +926,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
926 926
927 /* Subtract the NTFS time offset, then convert to 1s intervals. */ 927 /* Subtract the NTFS time offset, then convert to 1s intervals. */
928 s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET; 928 s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
929 u64 abs_t;
929 930
930 /* 931 /*
931 * Unfortunately can not use normal 64 bit division on 32 bit arch, but 932 * Unfortunately can not use normal 64 bit division on 32 bit arch, but
@@ -933,13 +934,14 @@ cifs_NTtimeToUnix(__le64 ntutc)
933 * to special case them 934 * to special case them
934 */ 935 */
935 if (t < 0) { 936 if (t < 0) {
936 t = -t; 937 abs_t = -t;
937 ts.tv_nsec = (long)(do_div(t, 10000000) * 100); 938 ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
938 ts.tv_nsec = -ts.tv_nsec; 939 ts.tv_nsec = -ts.tv_nsec;
939 ts.tv_sec = -t; 940 ts.tv_sec = -abs_t;
940 } else { 941 } else {
941 ts.tv_nsec = (long)do_div(t, 10000000) * 100; 942 abs_t = t;
942 ts.tv_sec = t; 943 ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
944 ts.tv_sec = abs_t;
943 } 945 }
944 946
945 return ts; 947 return ts;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8eaf20a80649..c295338e0a98 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -69,7 +69,8 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
69 * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT 69 * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
70 * 70 *
71 * Find the dentry that matches "name". If there isn't one, create one. If it's 71 * Find the dentry that matches "name". If there isn't one, create one. If it's
72 * a negative dentry or the uniqueid changed, then drop it and recreate it. 72 * a negative dentry or the uniqueid or filetype(mode) changed,
73 * then drop it and recreate it.
73 */ 74 */
74static void 75static void
75cifs_prime_dcache(struct dentry *parent, struct qstr *name, 76cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -97,8 +98,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
97 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) 98 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
98 fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; 99 fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
99 100
100 /* update inode in place if i_ino didn't change */ 101 /* update inode in place
101 if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { 102 * if both i_ino and i_mode didn't change */
103 if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
104 (inode->i_mode & S_IFMT) ==
105 (fattr->cf_mode & S_IFMT)) {
102 cifs_fattr_to_inode(inode, fattr); 106 cifs_fattr_to_inode(inode, fattr);
103 goto out; 107 goto out;
104 } 108 }
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index f1cefc9763ed..689f035915cf 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -32,12 +32,14 @@
32static int 32static int
33check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) 33check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
34{ 34{
35 __u64 wire_mid = le64_to_cpu(hdr->MessageId);
36
35 /* 37 /*
36 * Make sure that this really is an SMB, that it is a response, 38 * Make sure that this really is an SMB, that it is a response,
37 * and that the message ids match. 39 * and that the message ids match.
38 */ 40 */
39 if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) && 41 if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
40 (mid == hdr->MessageId)) { 42 (mid == wire_mid)) {
41 if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) 43 if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
42 return 0; 44 return 0;
43 else { 45 else {
@@ -51,11 +53,11 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
51 if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER) 53 if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
52 cifs_dbg(VFS, "Bad protocol string signature header %x\n", 54 cifs_dbg(VFS, "Bad protocol string signature header %x\n",
53 *(unsigned int *) hdr->ProtocolId); 55 *(unsigned int *) hdr->ProtocolId);
54 if (mid != hdr->MessageId) 56 if (mid != wire_mid)
55 cifs_dbg(VFS, "Mids do not match: %llu and %llu\n", 57 cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
56 mid, hdr->MessageId); 58 mid, wire_mid);
57 } 59 }
58 cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId); 60 cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
59 return 1; 61 return 1;
60} 62}
61 63
@@ -95,7 +97,7 @@ smb2_check_message(char *buf, unsigned int length)
95{ 97{
96 struct smb2_hdr *hdr = (struct smb2_hdr *)buf; 98 struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
97 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; 99 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
98 __u64 mid = hdr->MessageId; 100 __u64 mid = le64_to_cpu(hdr->MessageId);
99 __u32 len = get_rfc1002_length(buf); 101 __u32 len = get_rfc1002_length(buf);
100 __u32 clc_len; /* calculated length */ 102 __u32 clc_len; /* calculated length */
101 int command; 103 int command;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 93fd0586f9ec..96b5d40a2ece 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -176,10 +176,11 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
176{ 176{
177 struct mid_q_entry *mid; 177 struct mid_q_entry *mid;
178 struct smb2_hdr *hdr = (struct smb2_hdr *)buf; 178 struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
179 __u64 wire_mid = le64_to_cpu(hdr->MessageId);
179 180
180 spin_lock(&GlobalMid_Lock); 181 spin_lock(&GlobalMid_Lock);
181 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 182 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
182 if ((mid->mid == hdr->MessageId) && 183 if ((mid->mid == wire_mid) &&
183 (mid->mid_state == MID_REQUEST_SUBMITTED) && 184 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
184 (mid->command == hdr->Command)) { 185 (mid->command == hdr->Command)) {
185 spin_unlock(&GlobalMid_Lock); 186 spin_unlock(&GlobalMid_Lock);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ce858477002a..70867d54fb8b 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -110,7 +110,7 @@ struct smb2_hdr {
110 __le16 CreditRequest; /* CreditResponse */ 110 __le16 CreditRequest; /* CreditResponse */
111 __le32 Flags; 111 __le32 Flags;
112 __le32 NextCommand; 112 __le32 NextCommand;
113 __u64 MessageId; /* opaque - so can stay little endian */ 113 __le64 MessageId;
114 __le32 ProcessId; 114 __le32 ProcessId;
115 __u32 TreeId; /* opaque - so do not make little endian */ 115 __u32 TreeId; /* opaque - so do not make little endian */
116 __u64 SessionId; /* opaque - so do not make little endian */ 116 __u64 SessionId; /* opaque - so do not make little endian */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 5111e7272db6..d4c5b6f109a7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -490,7 +490,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer,
490 return temp; 490 return temp;
491 else { 491 else {
492 memset(temp, 0, sizeof(struct mid_q_entry)); 492 memset(temp, 0, sizeof(struct mid_q_entry));
493 temp->mid = smb_buffer->MessageId; /* always LE */ 493 temp->mid = le64_to_cpu(smb_buffer->MessageId);
494 temp->pid = current->pid; 494 temp->pid = current->pid;
495 temp->command = smb_buffer->Command; /* Always LE */ 495 temp->command = smb_buffer->Command; /* Always LE */
496 temp->when_alloc = jiffies; 496 temp->when_alloc = jiffies;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e5d3eadf47b1..bed43081720f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5166 5166
5167 /* fallback to generic here if not in extents fmt */ 5167 /* fallback to generic here if not in extents fmt */
5168 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5168 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5169 return __generic_block_fiemap(inode, fieinfo, start, len, 5169 return generic_block_fiemap(inode, fieinfo, start, len,
5170 ext4_get_block); 5170 ext4_get_block);
5171 5171
5172 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5172 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5173 return -EBADR; 5173 return -EBADR;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 513c12cf444c..8131be8c0af3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
273 * we determine this extent as a data or a hole according to whether the 273 * we determine this extent as a data or a hole according to whether the
274 * page cache has data or not. 274 * page cache has data or not.
275 */ 275 */
276static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, 276static int ext4_find_unwritten_pgoff(struct inode *inode,
277 loff_t endoff, loff_t *offset) 277 int whence,
278 struct ext4_map_blocks *map,
279 loff_t *offset)
278{ 280{
279 struct pagevec pvec; 281 struct pagevec pvec;
282 unsigned int blkbits;
280 pgoff_t index; 283 pgoff_t index;
281 pgoff_t end; 284 pgoff_t end;
285 loff_t endoff;
282 loff_t startoff; 286 loff_t startoff;
283 loff_t lastoff; 287 loff_t lastoff;
284 int found = 0; 288 int found = 0;
285 289
290 blkbits = inode->i_sb->s_blocksize_bits;
286 startoff = *offset; 291 startoff = *offset;
287 lastoff = startoff; 292 lastoff = startoff;
288 293 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
289 294
290 index = startoff >> PAGE_CACHE_SHIFT; 295 index = startoff >> PAGE_CACHE_SHIFT;
291 end = endoff >> PAGE_CACHE_SHIFT; 296 end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
403static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 408static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
404{ 409{
405 struct inode *inode = file->f_mapping->host; 410 struct inode *inode = file->f_mapping->host;
406 struct fiemap_extent_info fie; 411 struct ext4_map_blocks map;
407 struct fiemap_extent ext[2]; 412 struct extent_status es;
408 loff_t next; 413 ext4_lblk_t start, last, end;
409 int i, ret = 0; 414 loff_t dataoff, isize;
415 int blkbits;
416 int ret = 0;
410 417
411 mutex_lock(&inode->i_mutex); 418 mutex_lock(&inode->i_mutex);
412 if (offset >= inode->i_size) { 419
420 isize = i_size_read(inode);
421 if (offset >= isize) {
413 mutex_unlock(&inode->i_mutex); 422 mutex_unlock(&inode->i_mutex);
414 return -ENXIO; 423 return -ENXIO;
415 } 424 }
416 fie.fi_flags = 0; 425
417 fie.fi_extents_max = 2; 426 blkbits = inode->i_sb->s_blocksize_bits;
418 fie.fi_extents_start = (struct fiemap_extent __user *) &ext; 427 start = offset >> blkbits;
419 while (1) { 428 last = start;
420 mm_segment_t old_fs = get_fs(); 429 end = isize >> blkbits;
421 430 dataoff = offset;
422 fie.fi_extents_mapped = 0; 431
423 memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); 432 do {
424 433 map.m_lblk = last;
425 set_fs(get_ds()); 434 map.m_len = end - last + 1;
426 ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); 435 ret = ext4_map_blocks(NULL, inode, &map, 0);
427 set_fs(old_fs); 436 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
428 if (ret) 437 if (last != start)
438 dataoff = (loff_t)last << blkbits;
429 break; 439 break;
440 }
430 441
431 /* No extents found, EOF */ 442 /*
432 if (!fie.fi_extents_mapped) { 443 * If there is a delay extent at this offset,
433 ret = -ENXIO; 444 * it will be as a data.
445 */
446 ext4_es_find_delayed_extent_range(inode, last, last, &es);
447 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
448 if (last != start)
449 dataoff = (loff_t)last << blkbits;
434 break; 450 break;
435 } 451 }
436 for (i = 0; i < fie.fi_extents_mapped; i++) {
437 next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
438 452
439 if (offset < (loff_t)ext[i].fe_logical) 453 /*
440 offset = (loff_t)ext[i].fe_logical; 454 * If there is a unwritten extent at this offset,
441 /* 455 * it will be as a data or a hole according to page
442 * If extent is not unwritten, then it contains valid 456 * cache that has data or not.
443 * data, mapped or delayed. 457 */
444 */ 458 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
445 if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) 459 int unwritten;
446 goto out; 460 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
461 &map, &dataoff);
462 if (unwritten)
463 break;
464 }
447 465
448 /* 466 last++;
449 * If there is a unwritten extent at this offset, 467 dataoff = (loff_t)last << blkbits;
450 * it will be as a data or a hole according to page 468 } while (last <= end);
451 * cache that has data or not.
452 */
453 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
454 next, &offset))
455 goto out;
456 469
457 if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
458 ret = -ENXIO;
459 goto out;
460 }
461 offset = next;
462 }
463 }
464 if (offset > inode->i_size)
465 offset = inode->i_size;
466out:
467 mutex_unlock(&inode->i_mutex); 470 mutex_unlock(&inode->i_mutex);
468 if (ret)
469 return ret;
470 471
471 return vfs_setpos(file, offset, maxsize); 472 if (dataoff > isize)
473 return -ENXIO;
474
475 return vfs_setpos(file, dataoff, maxsize);
472} 476}
473 477
474/* 478/*
475 * ext4_seek_hole() retrieves the offset for SEEK_HOLE 479 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
476 */ 480 */
477static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 481static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
478{ 482{
479 struct inode *inode = file->f_mapping->host; 483 struct inode *inode = file->f_mapping->host;
480 struct fiemap_extent_info fie; 484 struct ext4_map_blocks map;
481 struct fiemap_extent ext[2]; 485 struct extent_status es;
482 loff_t next; 486 ext4_lblk_t start, last, end;
483 int i, ret = 0; 487 loff_t holeoff, isize;
488 int blkbits;
489 int ret = 0;
484 490
485 mutex_lock(&inode->i_mutex); 491 mutex_lock(&inode->i_mutex);
486 if (offset >= inode->i_size) { 492
493 isize = i_size_read(inode);
494 if (offset >= isize) {
487 mutex_unlock(&inode->i_mutex); 495 mutex_unlock(&inode->i_mutex);
488 return -ENXIO; 496 return -ENXIO;
489 } 497 }
490 498
491 fie.fi_flags = 0; 499 blkbits = inode->i_sb->s_blocksize_bits;
492 fie.fi_extents_max = 2; 500 start = offset >> blkbits;
493 fie.fi_extents_start = (struct fiemap_extent __user *)&ext; 501 last = start;
494 while (1) { 502 end = isize >> blkbits;
495 mm_segment_t old_fs = get_fs(); 503 holeoff = offset;
496
497 fie.fi_extents_mapped = 0;
498 memset(ext, 0, sizeof(*ext));
499 504
500 set_fs(get_ds()); 505 do {
501 ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); 506 map.m_lblk = last;
502 set_fs(old_fs); 507 map.m_len = end - last + 1;
503 if (ret) 508 ret = ext4_map_blocks(NULL, inode, &map, 0);
504 break; 509 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
510 last += ret;
511 holeoff = (loff_t)last << blkbits;
512 continue;
513 }
505 514
506 /* No extents found */ 515 /*
507 if (!fie.fi_extents_mapped) 516 * If there is a delay extent at this offset,
508 break; 517 * we will skip this extent.
518 */
519 ext4_es_find_delayed_extent_range(inode, last, last, &es);
520 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
521 last = es.es_lblk + es.es_len;
522 holeoff = (loff_t)last << blkbits;
523 continue;
524 }
509 525
510 for (i = 0; i < fie.fi_extents_mapped; i++) { 526 /*
511 next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); 527 * If there is a unwritten extent at this offset,
512 /* 528 * it will be as a data or a hole according to page
513 * If extent is not unwritten, then it contains valid 529 * cache that has data or not.
514 * data, mapped or delayed. 530 */
515 */ 531 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
516 if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { 532 int unwritten;
517 if (offset < (loff_t)ext[i].fe_logical) 533 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
518 goto out; 534 &map, &holeoff);
519 offset = next; 535 if (!unwritten) {
536 last += ret;
537 holeoff = (loff_t)last << blkbits;
520 continue; 538 continue;
521 } 539 }
522 /*
523 * If there is a unwritten extent at this offset,
524 * it will be as a data or a hole according to page
525 * cache that has data or not.
526 */
527 if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
528 next, &offset))
529 goto out;
530
531 offset = next;
532 if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
533 goto out;
534 } 540 }
535 } 541
536 if (offset > inode->i_size) 542 /* find a hole */
537 offset = inode->i_size; 543 break;
538out: 544 } while (last <= end);
545
539 mutex_unlock(&inode->i_mutex); 546 mutex_unlock(&inode->i_mutex);
540 if (ret)
541 return ret;
542 547
543 return vfs_setpos(file, offset, maxsize); 548 if (holeoff > isize)
549 holeoff = isize;
550
551 return vfs_setpos(file, holeoff, maxsize);
544} 552}
545 553
546/* 554/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index bf76f405a5f9..8a8ec6293b19 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb)
24 return -EPERM; 24 return -EPERM;
25 25
26 /* 26 /*
27 * If we are not using the primary superblock/GDT copy don't resize,
28 * because the user tools have no way of handling this. Probably a
29 * bad time to do it anyways.
30 */
31 if (EXT4_SB(sb)->s_sbh->b_blocknr !=
32 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
33 ext4_warning(sb, "won't resize using backup superblock at %llu",
34 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
35 return -EPERM;
36 }
37
38 /*
27 * We are not allowed to do online-resizing on a filesystem mounted 39 * We are not allowed to do online-resizing on a filesystem mounted
28 * with error, because it can destroy the filesystem easily. 40 * with error, because it can destroy the filesystem easily.
29 */ 41 */
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
758 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 770 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
759 gdb_num); 771 gdb_num);
760 772
761 /*
762 * If we are not using the primary superblock/GDT copy don't resize,
763 * because the user tools have no way of handling this. Probably a
764 * bad time to do it anyways.
765 */
766 if (EXT4_SB(sb)->s_sbh->b_blocknr !=
767 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
768 ext4_warning(sb, "won't resize using backup superblock at %llu",
769 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
770 return -EPERM;
771 }
772
773 gdb_bh = sb_bread(sb, gdblock); 773 gdb_bh = sb_bread(sb, gdblock);
774 if (!gdb_bh) 774 if (!gdb_bh)
775 return -EIO; 775 return -EIO;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 43c92b1685cb..74c5f53595fb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3482 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3482 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3483 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 3483 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
3484 EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 3484 EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
3485 ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " 3485 ext4_warning(sb, "metadata_csum and uninit_bg are "
3486 "redundant flags; please run fsck."); 3486 "redundant flags; please run fsck.");
3487 3487
3488 /* Check for a known checksum algorithm */ 3488 /* Check for a known checksum algorithm */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 99d440a4a6ba..ee85cd4e136a 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
740 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY 740 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
741 * is defined as O_NONBLOCK on some platforms and not on others. 741 * is defined as O_NONBLOCK on some platforms and not on others.
742 */ 742 */
743 BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 743 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
744 O_RDONLY | O_WRONLY | O_RDWR | 744 O_RDONLY | O_WRONLY | O_RDWR |
745 O_CREAT | O_EXCL | O_NOCTTY | 745 O_CREAT | O_EXCL | O_NOCTTY |
746 O_TRUNC | O_APPEND | /* O_NONBLOCK | */ 746 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
747 __O_SYNC | O_DSYNC | FASYNC | 747 __O_SYNC | O_DSYNC | FASYNC |
748 O_DIRECT | O_LARGEFILE | O_DIRECTORY | 748 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
749 O_NOFOLLOW | O_NOATIME | O_CLOEXEC | 749 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
750 __FMODE_EXEC | O_PATH | __O_TMPFILE 750 __FMODE_EXEC | O_PATH | __O_TMPFILE |
751 __FMODE_NONOTIFY
751 )); 752 ));
752 753
753 fasync_cache = kmem_cache_create("fasync_cache", 754 fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ba1107977f2e..ed19a7d622fa 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
131 req->in.h.pid = current->pid; 131 req->in.h.pid = current->pid;
132} 132}
133 133
134void fuse_set_initialized(struct fuse_conn *fc)
135{
136 /* Make sure stores before this are seen on another CPU */
137 smp_wmb();
138 fc->initialized = 1;
139}
140
134static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 141static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
135{ 142{
136 return !fc->initialized || (for_background && fc->blocked); 143 return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
155 if (intr) 162 if (intr)
156 goto out; 163 goto out;
157 } 164 }
165 /* Matches smp_wmb() in fuse_set_initialized() */
166 smp_rmb();
158 167
159 err = -ENOTCONN; 168 err = -ENOTCONN;
160 if (!fc->connected) 169 if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
253 262
254 atomic_inc(&fc->num_waiting); 263 atomic_inc(&fc->num_waiting);
255 wait_event(fc->blocked_waitq, fc->initialized); 264 wait_event(fc->blocked_waitq, fc->initialized);
265 /* Matches smp_wmb() in fuse_set_initialized() */
266 smp_rmb();
256 req = fuse_request_alloc(0); 267 req = fuse_request_alloc(0);
257 if (!req) 268 if (!req)
258 req = get_reserved_req(fc, file); 269 req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
511} 522}
512EXPORT_SYMBOL_GPL(fuse_request_send); 523EXPORT_SYMBOL_GPL(fuse_request_send);
513 524
525static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
526{
527 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
528 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
529
530 if (fc->minor < 9) {
531 switch (args->in.h.opcode) {
532 case FUSE_LOOKUP:
533 case FUSE_CREATE:
534 case FUSE_MKNOD:
535 case FUSE_MKDIR:
536 case FUSE_SYMLINK:
537 case FUSE_LINK:
538 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
539 break;
540 case FUSE_GETATTR:
541 case FUSE_SETATTR:
542 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
543 break;
544 }
545 }
546 if (fc->minor < 12) {
547 switch (args->in.h.opcode) {
548 case FUSE_CREATE:
549 args->in.args[0].size = sizeof(struct fuse_open_in);
550 break;
551 case FUSE_MKNOD:
552 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
553 break;
554 }
555 }
556}
557
514ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) 558ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
515{ 559{
516 struct fuse_req *req; 560 struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
520 if (IS_ERR(req)) 564 if (IS_ERR(req))
521 return PTR_ERR(req); 565 return PTR_ERR(req);
522 566
567 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
568 fuse_adjust_compat(fc, args);
569
523 req->in.h.opcode = args->in.h.opcode; 570 req->in.h.opcode = args->in.h.opcode;
524 req->in.h.nodeid = args->in.h.nodeid; 571 req->in.h.nodeid = args->in.h.nodeid;
525 req->in.numargs = args->in.numargs; 572 req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
2127 if (fc->connected) { 2174 if (fc->connected) {
2128 fc->connected = 0; 2175 fc->connected = 0;
2129 fc->blocked = 0; 2176 fc->blocked = 0;
2130 fc->initialized = 1; 2177 fuse_set_initialized(fc);
2131 end_io_requests(fc); 2178 end_io_requests(fc);
2132 end_queued_requests(fc); 2179 end_queued_requests(fc);
2133 end_polls(fc); 2180 end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
2146 spin_lock(&fc->lock); 2193 spin_lock(&fc->lock);
2147 fc->connected = 0; 2194 fc->connected = 0;
2148 fc->blocked = 0; 2195 fc->blocked = 0;
2149 fc->initialized = 1; 2196 fuse_set_initialized(fc);
2150 end_queued_requests(fc); 2197 end_queued_requests(fc);
2151 end_polls(fc); 2198 end_polls(fc);
2152 wake_up_all(&fc->blocked_waitq); 2199 wake_up_all(&fc->blocked_waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 252b8a5de8b5..08e7b1a9d5d0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
156 args->in.args[0].size = name->len + 1; 156 args->in.args[0].size = name->len + 1;
157 args->in.args[0].value = name->name; 157 args->in.args[0].value = name->name;
158 args->out.numargs = 1; 158 args->out.numargs = 1;
159 if (fc->minor < 9) 159 args->out.args[0].size = sizeof(struct fuse_entry_out);
160 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
161 else
162 args->out.args[0].size = sizeof(struct fuse_entry_out);
163 args->out.args[0].value = outarg; 160 args->out.args[0].value = outarg;
164} 161}
165 162
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
422 args.in.h.opcode = FUSE_CREATE; 419 args.in.h.opcode = FUSE_CREATE;
423 args.in.h.nodeid = get_node_id(dir); 420 args.in.h.nodeid = get_node_id(dir);
424 args.in.numargs = 2; 421 args.in.numargs = 2;
425 args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : 422 args.in.args[0].size = sizeof(inarg);
426 sizeof(inarg);
427 args.in.args[0].value = &inarg; 423 args.in.args[0].value = &inarg;
428 args.in.args[1].size = entry->d_name.len + 1; 424 args.in.args[1].size = entry->d_name.len + 1;
429 args.in.args[1].value = entry->d_name.name; 425 args.in.args[1].value = entry->d_name.name;
430 args.out.numargs = 2; 426 args.out.numargs = 2;
431 if (fc->minor < 9) 427 args.out.args[0].size = sizeof(outentry);
432 args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
433 else
434 args.out.args[0].size = sizeof(outentry);
435 args.out.args[0].value = &outentry; 428 args.out.args[0].value = &outentry;
436 args.out.args[1].size = sizeof(outopen); 429 args.out.args[1].size = sizeof(outopen);
437 args.out.args[1].value = &outopen; 430 args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
539 memset(&outarg, 0, sizeof(outarg)); 532 memset(&outarg, 0, sizeof(outarg));
540 args->in.h.nodeid = get_node_id(dir); 533 args->in.h.nodeid = get_node_id(dir);
541 args->out.numargs = 1; 534 args->out.numargs = 1;
542 if (fc->minor < 9) 535 args->out.args[0].size = sizeof(outarg);
543 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
544 else
545 args->out.args[0].size = sizeof(outarg);
546 args->out.args[0].value = &outarg; 536 args->out.args[0].value = &outarg;
547 err = fuse_simple_request(fc, args); 537 err = fuse_simple_request(fc, args);
548 if (err) 538 if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
592 inarg.umask = current_umask(); 582 inarg.umask = current_umask();
593 args.in.h.opcode = FUSE_MKNOD; 583 args.in.h.opcode = FUSE_MKNOD;
594 args.in.numargs = 2; 584 args.in.numargs = 2;
595 args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : 585 args.in.args[0].size = sizeof(inarg);
596 sizeof(inarg);
597 args.in.args[0].value = &inarg; 586 args.in.args[0].value = &inarg;
598 args.in.args[1].size = entry->d_name.len + 1; 587 args.in.args[1].size = entry->d_name.len + 1;
599 args.in.args[1].value = entry->d_name.name; 588 args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
899 args.in.args[0].size = sizeof(inarg); 888 args.in.args[0].size = sizeof(inarg);
900 args.in.args[0].value = &inarg; 889 args.in.args[0].value = &inarg;
901 args.out.numargs = 1; 890 args.out.numargs = 1;
902 if (fc->minor < 9) 891 args.out.args[0].size = sizeof(outarg);
903 args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
904 else
905 args.out.args[0].size = sizeof(outarg);
906 args.out.args[0].value = &outarg; 892 args.out.args[0].value = &outarg;
907 err = fuse_simple_request(fc, &args); 893 err = fuse_simple_request(fc, &args);
908 if (!err) { 894 if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
1574 args->in.args[0].size = sizeof(*inarg_p); 1560 args->in.args[0].size = sizeof(*inarg_p);
1575 args->in.args[0].value = inarg_p; 1561 args->in.args[0].value = inarg_p;
1576 args->out.numargs = 1; 1562 args->out.numargs = 1;
1577 if (fc->minor < 9) 1563 args->out.args[0].size = sizeof(*outarg_p);
1578 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
1579 else
1580 args->out.args[0].size = sizeof(*outarg_p);
1581 args->out.args[0].value = outarg_p; 1564 args->out.args[0].value = outarg_p;
1582} 1565}
1583 1566
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e0fc6725d1d0..1cdfb07c1376 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
906int fuse_do_setattr(struct inode *inode, struct iattr *attr, 906int fuse_do_setattr(struct inode *inode, struct iattr *attr,
907 struct file *file); 907 struct file *file);
908 908
909void fuse_set_initialized(struct fuse_conn *fc);
910
909#endif /* _FS_FUSE_I_H */ 911#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6749109f255d..f38256e4476e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
424 args.in.h.opcode = FUSE_STATFS; 424 args.in.h.opcode = FUSE_STATFS;
425 args.in.h.nodeid = get_node_id(dentry->d_inode); 425 args.in.h.nodeid = get_node_id(dentry->d_inode);
426 args.out.numargs = 1; 426 args.out.numargs = 1;
427 args.out.args[0].size = 427 args.out.args[0].size = sizeof(outarg);
428 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
429 args.out.args[0].value = &outarg; 428 args.out.args[0].value = &outarg;
430 err = fuse_simple_request(fc, &args); 429 err = fuse_simple_request(fc, &args);
431 if (!err) 430 if (!err)
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
898 fc->max_write = max_t(unsigned, 4096, fc->max_write); 897 fc->max_write = max_t(unsigned, 4096, fc->max_write);
899 fc->conn_init = 1; 898 fc->conn_init = 1;
900 } 899 }
901 fc->initialized = 1; 900 fuse_set_initialized(fc);
902 wake_up_all(&fc->blocked_waitq); 901 wake_up_all(&fc->blocked_waitq);
903} 902}
904 903
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index bb63254ed848..735d7522a3a9 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -362,6 +362,9 @@ repeat:
362 rs.cont_size = isonum_733(rr->u.CE.size); 362 rs.cont_size = isonum_733(rr->u.CE.size);
363 break; 363 break;
364 case SIG('E', 'R'): 364 case SIG('E', 'R'):
365 /* Invalid length of ER tag id? */
366 if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
367 goto out;
365 ISOFS_SB(inode->i_sb)->s_rock = 1; 368 ISOFS_SB(inode->i_sb)->s_rock = 1;
366 printk(KERN_DEBUG "ISO 9660 Extensions: "); 369 printk(KERN_DEBUG "ISO 9660 Extensions: ");
367 { 370 {
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 37989f02a226..2d881b381d2b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns)
201static int kernfs_name_compare(unsigned int hash, const char *name, 201static int kernfs_name_compare(unsigned int hash, const char *name,
202 const void *ns, const struct kernfs_node *kn) 202 const void *ns, const struct kernfs_node *kn)
203{ 203{
204 if (hash != kn->hash) 204 if (hash < kn->hash)
205 return hash - kn->hash; 205 return -1;
206 if (ns != kn->ns) 206 if (hash > kn->hash)
207 return ns - kn->ns; 207 return 1;
208 if (ns < kn->ns)
209 return -1;
210 if (ns > kn->ns)
211 return 1;
208 return strcmp(name, kn->name); 212 return strcmp(name, kn->name);
209} 213}
210 214
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index e94c887da2d7..55505cbe11af 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -138,10 +138,6 @@ lockd(void *vrqstp)
138 138
139 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 139 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
140 140
141 if (!nlm_timeout)
142 nlm_timeout = LOCKD_DFLT_TIMEO;
143 nlmsvc_timeout = nlm_timeout * HZ;
144
145 /* 141 /*
146 * The main request loop. We don't terminate until the last 142 * The main request loop. We don't terminate until the last
147 * NFS mount or NFS daemon has gone away. 143 * NFS mount or NFS daemon has gone away.
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
350 printk(KERN_WARNING 346 printk(KERN_WARNING
351 "lockd_up: no pid, %d users??\n", nlmsvc_users); 347 "lockd_up: no pid, %d users??\n", nlmsvc_users);
352 348
349 if (!nlm_timeout)
350 nlm_timeout = LOCKD_DFLT_TIMEO;
351 nlmsvc_timeout = nlm_timeout * HZ;
352
353 serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); 353 serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
354 if (!serv) { 354 if (!serv) {
355 printk(KERN_WARNING "lockd_up: create service failed\n"); 355 printk(KERN_WARNING "lockd_up: create service failed\n");
diff --git a/fs/locks.c b/fs/locks.c
index 735b8d3fa78c..59e2f905e4ff 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
1702 break; 1702 break;
1703 } 1703 }
1704 trace_generic_delete_lease(inode, fl); 1704 trace_generic_delete_lease(inode, fl);
1705 if (fl) 1705 if (fl && IS_LEASE(fl))
1706 error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); 1706 error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
1707 spin_unlock(&inode->i_lock); 1707 spin_unlock(&inode->i_lock);
1708 locks_dispose_list(&dispose); 1708 locks_dispose_list(&dispose);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 03311259b0c4..953daa44a282 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
228 kfree(clp->cl_serverowner); 228 kfree(clp->cl_serverowner);
229 kfree(clp->cl_serverscope); 229 kfree(clp->cl_serverscope);
230 kfree(clp->cl_implid); 230 kfree(clp->cl_implid);
231 kfree(clp->cl_owner_id);
231} 232}
232 233
233void nfs4_free_client(struct nfs_client *clp) 234void nfs4_free_client(struct nfs_client *clp)
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep,
452 spin_unlock(&nn->nfs_client_lock); 453 spin_unlock(&nn->nfs_client_lock);
453} 454}
454 455
456static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
457 const struct nfs_client *clp2)
458{
459 if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL)
460 return true;
461 return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
462}
463
455/** 464/**
456 * nfs40_walk_client_list - Find server that recognizes a client ID 465 * nfs40_walk_client_list - Find server that recognizes a client ID
457 * 466 *
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
483 if (pos->rpc_ops != new->rpc_ops) 492 if (pos->rpc_ops != new->rpc_ops)
484 continue; 493 continue;
485 494
486 if (pos->cl_proto != new->cl_proto)
487 continue;
488
489 if (pos->cl_minorversion != new->cl_minorversion) 495 if (pos->cl_minorversion != new->cl_minorversion)
490 continue; 496 continue;
491 497
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
510 if (pos->cl_clientid != new->cl_clientid) 516 if (pos->cl_clientid != new->cl_clientid)
511 continue; 517 continue;
512 518
519 if (!nfs4_match_client_owner_id(pos, new))
520 continue;
521
513 atomic_inc(&pos->cl_count); 522 atomic_inc(&pos->cl_count);
514 spin_unlock(&nn->nfs_client_lock); 523 spin_unlock(&nn->nfs_client_lock);
515 524
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
566} 575}
567 576
568/* 577/*
569 * Returns true if the server owners match 578 * Returns true if the server major ids match
570 */ 579 */
571static bool 580static bool
572nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b) 581nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
573{ 582{
574 struct nfs41_server_owner *o1 = a->cl_serverowner; 583 struct nfs41_server_owner *o1 = a->cl_serverowner;
575 struct nfs41_server_owner *o2 = b->cl_serverowner; 584 struct nfs41_server_owner *o2 = b->cl_serverowner;
576 585
577 if (o1->minor_id != o2->minor_id) {
578 dprintk("NFS: --> %s server owner minor IDs do not match\n",
579 __func__);
580 return false;
581 }
582
583 if (o1->major_id_sz != o2->major_id_sz) 586 if (o1->major_id_sz != o2->major_id_sz)
584 goto out_major_mismatch; 587 goto out_major_mismatch;
585 if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) 588 if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
621 if (pos->rpc_ops != new->rpc_ops) 624 if (pos->rpc_ops != new->rpc_ops)
622 continue; 625 continue;
623 626
624 if (pos->cl_proto != new->cl_proto)
625 continue;
626
627 if (pos->cl_minorversion != new->cl_minorversion) 627 if (pos->cl_minorversion != new->cl_minorversion)
628 continue; 628 continue;
629 629
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new,
654 if (!nfs4_match_clientids(pos, new)) 654 if (!nfs4_match_clientids(pos, new))
655 continue; 655 continue;
656 656
657 if (!nfs4_match_serverowners(pos, new)) 657 /*
658 * Note that session trunking is just a special subcase of
659 * client id trunking. In either case, we want to fall back
660 * to using the existing nfs_client.
661 */
662 if (!nfs4_check_clientid_trunking(pos, new))
663 continue;
664
665 /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
666 * uniform string, however someone might switch the
667 * uniquifier string on us.
668 */
669 if (!nfs4_match_client_owner_id(pos, new))
658 continue; 670 continue;
659 671
660 atomic_inc(&pos->cl_count); 672 atomic_inc(&pos->cl_count);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e7f8d5ff2581..c347705b0161 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1117 return 0; 1117 return 0;
1118 if ((delegation->type & fmode) != fmode) 1118 if ((delegation->type & fmode) != fmode)
1119 return 0; 1119 return 0;
1120 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1121 return 0;
1122 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1120 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1123 return 0; 1121 return 0;
1124 nfs_mark_delegation_referenced(delegation); 1122 nfs_mark_delegation_referenced(delegation);
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4917} 4915}
4918 4916
4919static unsigned int 4917static unsigned int
4920nfs4_init_nonuniform_client_string(const struct nfs_client *clp, 4918nfs4_init_nonuniform_client_string(struct nfs_client *clp,
4921 char *buf, size_t len) 4919 char *buf, size_t len)
4922{ 4920{
4923 unsigned int result; 4921 unsigned int result;
4924 4922
4923 if (clp->cl_owner_id != NULL)
4924 return strlcpy(buf, clp->cl_owner_id, len);
4925
4925 rcu_read_lock(); 4926 rcu_read_lock();
4926 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4927 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
4927 clp->cl_ipaddr, 4928 clp->cl_ipaddr,
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
4930 rpc_peeraddr2str(clp->cl_rpcclient, 4931 rpc_peeraddr2str(clp->cl_rpcclient,
4931 RPC_DISPLAY_PROTO)); 4932 RPC_DISPLAY_PROTO));
4932 rcu_read_unlock(); 4933 rcu_read_unlock();
4934 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
4933 return result; 4935 return result;
4934} 4936}
4935 4937
4936static unsigned int 4938static unsigned int
4937nfs4_init_uniform_client_string(const struct nfs_client *clp, 4939nfs4_init_uniform_client_string(struct nfs_client *clp,
4938 char *buf, size_t len) 4940 char *buf, size_t len)
4939{ 4941{
4940 const char *nodename = clp->cl_rpcclient->cl_nodename; 4942 const char *nodename = clp->cl_rpcclient->cl_nodename;
4943 unsigned int result;
4944
4945 if (clp->cl_owner_id != NULL)
4946 return strlcpy(buf, clp->cl_owner_id, len);
4941 4947
4942 if (nfs4_client_id_uniquifier[0] != '\0') 4948 if (nfs4_client_id_uniquifier[0] != '\0')
4943 return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", 4949 result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
4944 clp->rpc_ops->version, 4950 clp->rpc_ops->version,
4945 clp->cl_minorversion, 4951 clp->cl_minorversion,
4946 nfs4_client_id_uniquifier, 4952 nfs4_client_id_uniquifier,
4947 nodename); 4953 nodename);
4948 return scnprintf(buf, len, "Linux NFSv%u.%u %s", 4954 else
4955 result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
4949 clp->rpc_ops->version, clp->cl_minorversion, 4956 clp->rpc_ops->version, clp->cl_minorversion,
4950 nodename); 4957 nodename);
4958 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
4959 return result;
4951} 4960}
4952 4961
4953/* 4962/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3550a9c87616..c06a1ba80d73 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3897 status = nfs4_setlease(dp); 3897 status = nfs4_setlease(dp);
3898 goto out; 3898 goto out;
3899 } 3899 }
3900 atomic_inc(&fp->fi_delegees);
3901 if (fp->fi_had_conflict) { 3900 if (fp->fi_had_conflict) {
3902 status = -EAGAIN; 3901 status = -EAGAIN;
3903 goto out_unlock; 3902 goto out_unlock;
3904 } 3903 }
3904 atomic_inc(&fp->fi_delegees);
3905 hash_delegation_locked(dp, fp); 3905 hash_delegation_locked(dp, fp);
3906 status = 0; 3906 status = 0;
3907out_unlock: 3907out_unlock:
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index c991616acca9..bff8567aa42d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
259 struct fsnotify_event *kevent; 259 struct fsnotify_event *kevent;
260 char __user *start; 260 char __user *start;
261 int ret; 261 int ret;
262 DEFINE_WAIT(wait); 262 DEFINE_WAIT_FUNC(wait, woken_wake_function);
263 263
264 start = buf; 264 start = buf;
265 group = file->private_data; 265 group = file->private_data;
266 266
267 pr_debug("%s: group=%p\n", __func__, group); 267 pr_debug("%s: group=%p\n", __func__, group);
268 268
269 add_wait_queue(&group->notification_waitq, &wait);
269 while (1) { 270 while (1) {
270 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
271
272 mutex_lock(&group->notification_mutex); 271 mutex_lock(&group->notification_mutex);
273 kevent = get_one_event(group, count); 272 kevent = get_one_event(group, count);
274 mutex_unlock(&group->notification_mutex); 273 mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
289 288
290 if (start != buf) 289 if (start != buf)
291 break; 290 break;
292 schedule(); 291
292 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
293 continue; 293 continue;
294 } 294 }
295 295
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
318 buf += ret; 318 buf += ret;
319 count -= ret; 319 count -= ret;
320 } 320 }
321 remove_wait_queue(&group->notification_waitq, &wait);
321 322
322 finish_wait(&group->notification_waitq, &wait);
323 if (start != buf && ret != -EFAULT) 323 if (start != buf && ret != -EFAULT)
324 ret = buf - start; 324 ret = buf - start;
325 return ret; 325 return ret;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 79b5af5e6a7b..cecd875653e4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2023,11 +2023,8 @@ leave:
2023 dlm_lockres_drop_inflight_ref(dlm, res); 2023 dlm_lockres_drop_inflight_ref(dlm, res);
2024 spin_unlock(&res->spinlock); 2024 spin_unlock(&res->spinlock);
2025 2025
2026 if (ret < 0) { 2026 if (ret < 0)
2027 mlog_errno(ret); 2027 mlog_errno(ret);
2028 if (newlock)
2029 dlm_lock_put(newlock);
2030 }
2031 2028
2032 return ret; 2029 return ret;
2033} 2030}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index b931e04e3388..914c121ec890 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
94 struct inode *inode, 94 struct inode *inode,
95 const char *symname); 95 const char *symname);
96 96
97static int ocfs2_double_lock(struct ocfs2_super *osb,
98 struct buffer_head **bh1,
99 struct inode *inode1,
100 struct buffer_head **bh2,
101 struct inode *inode2,
102 int rename);
103
104static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
97/* An orphan dir name is an 8 byte value, printed as a hex string */ 105/* An orphan dir name is an 8 byte value, printed as a hex string */
98#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) 106#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
99 107
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
678{ 686{
679 handle_t *handle; 687 handle_t *handle;
680 struct inode *inode = old_dentry->d_inode; 688 struct inode *inode = old_dentry->d_inode;
689 struct inode *old_dir = old_dentry->d_parent->d_inode;
681 int err; 690 int err;
682 struct buffer_head *fe_bh = NULL; 691 struct buffer_head *fe_bh = NULL;
692 struct buffer_head *old_dir_bh = NULL;
683 struct buffer_head *parent_fe_bh = NULL; 693 struct buffer_head *parent_fe_bh = NULL;
684 struct ocfs2_dinode *fe = NULL; 694 struct ocfs2_dinode *fe = NULL;
685 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); 695 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
696 706
697 dquot_initialize(dir); 707 dquot_initialize(dir);
698 708
699 err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); 709 err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
710 &parent_fe_bh, dir, 0);
700 if (err < 0) { 711 if (err < 0) {
701 if (err != -ENOENT) 712 if (err != -ENOENT)
702 mlog_errno(err); 713 mlog_errno(err);
703 return err; 714 return err;
704 } 715 }
705 716
717 /* make sure both dirs have bhs
718 * get an extra ref on old_dir_bh if old==new */
719 if (!parent_fe_bh) {
720 if (old_dir_bh) {
721 parent_fe_bh = old_dir_bh;
722 get_bh(parent_fe_bh);
723 } else {
724 mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
725 err = -EIO;
726 goto out;
727 }
728 }
729
706 if (!dir->i_nlink) { 730 if (!dir->i_nlink) {
707 err = -ENOENT; 731 err = -ENOENT;
708 goto out; 732 goto out;
709 } 733 }
710 734
711 err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, 735 err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
712 old_dentry->d_name.len, &old_de_ino); 736 old_dentry->d_name.len, &old_de_ino);
713 if (err) { 737 if (err) {
714 err = -ENOENT; 738 err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
801 ocfs2_inode_unlock(inode, 1); 825 ocfs2_inode_unlock(inode, 1);
802 826
803out: 827out:
804 ocfs2_inode_unlock(dir, 1); 828 ocfs2_double_unlock(old_dir, dir);
805 829
806 brelse(fe_bh); 830 brelse(fe_bh);
807 brelse(parent_fe_bh); 831 brelse(parent_fe_bh);
832 brelse(old_dir_bh);
808 833
809 ocfs2_free_dir_lookup_result(&lookup); 834 ocfs2_free_dir_lookup_result(&lookup);
810 835
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
1072} 1097}
1073 1098
1074/* 1099/*
1075 * The only place this should be used is rename! 1100 * The only place this should be used is rename and link!
1076 * if they have the same id, then the 1st one is the only one locked. 1101 * if they have the same id, then the 1st one is the only one locked.
1077 */ 1102 */
1078static int ocfs2_double_lock(struct ocfs2_super *osb, 1103static int ocfs2_double_lock(struct ocfs2_super *osb,
1079 struct buffer_head **bh1, 1104 struct buffer_head **bh1,
1080 struct inode *inode1, 1105 struct inode *inode1,
1081 struct buffer_head **bh2, 1106 struct buffer_head **bh2,
1082 struct inode *inode2) 1107 struct inode *inode2,
1108 int rename)
1083{ 1109{
1084 int status; 1110 int status;
1085 int inode1_is_ancestor, inode2_is_ancestor; 1111 int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1127 } 1153 }
1128 /* lock id2 */ 1154 /* lock id2 */
1129 status = ocfs2_inode_lock_nested(inode2, bh2, 1, 1155 status = ocfs2_inode_lock_nested(inode2, bh2, 1,
1130 OI_LS_RENAME1); 1156 rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
1131 if (status < 0) { 1157 if (status < 0) {
1132 if (status != -ENOENT) 1158 if (status != -ENOENT)
1133 mlog_errno(status); 1159 mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1136 } 1162 }
1137 1163
1138 /* lock id1 */ 1164 /* lock id1 */
1139 status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); 1165 status = ocfs2_inode_lock_nested(inode1, bh1, 1,
1166 rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT);
1140 if (status < 0) { 1167 if (status < 0) {
1141 /* 1168 /*
1142 * An error return must mean that no cluster locks 1169 * An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
1252 1279
1253 /* if old and new are the same, this'll just do one lock. */ 1280 /* if old and new are the same, this'll just do one lock. */
1254 status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, 1281 status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
1255 &new_dir_bh, new_dir); 1282 &new_dir_bh, new_dir, 1);
1256 if (status < 0) { 1283 if (status < 0) {
1257 mlog_errno(status); 1284 mlog_errno(status);
1258 goto bail; 1285 goto bail;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index a012c51caffd..05e90edd1992 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -57,6 +57,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
57 sector_t offset; 57 sector_t offset;
58 int i, num, ret = 0; 58 int i, num, ret = 0;
59 struct extent_position epos = { NULL, 0, {0, 0} }; 59 struct extent_position epos = { NULL, 0, {0, 0} };
60 struct super_block *sb = dir->i_sb;
60 61
61 if (ctx->pos == 0) { 62 if (ctx->pos == 0) {
62 if (!dir_emit_dot(file, ctx)) 63 if (!dir_emit_dot(file, ctx))
@@ -76,16 +77,16 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
76 if (nf_pos == 0) 77 if (nf_pos == 0)
77 nf_pos = udf_ext0_offset(dir); 78 nf_pos = udf_ext0_offset(dir);
78 79
79 fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1); 80 fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1);
80 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 81 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
81 if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits, 82 if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits,
82 &epos, &eloc, &elen, &offset) 83 &epos, &eloc, &elen, &offset)
83 != (EXT_RECORDED_ALLOCATED >> 30)) { 84 != (EXT_RECORDED_ALLOCATED >> 30)) {
84 ret = -ENOENT; 85 ret = -ENOENT;
85 goto out; 86 goto out;
86 } 87 }
87 block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); 88 block = udf_get_lb_pblock(sb, &eloc, offset);
88 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 89 if ((++offset << sb->s_blocksize_bits) < elen) {
89 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 90 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
90 epos.offset -= sizeof(struct short_ad); 91 epos.offset -= sizeof(struct short_ad);
91 else if (iinfo->i_alloc_type == 92 else if (iinfo->i_alloc_type ==
@@ -95,18 +96,18 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
95 offset = 0; 96 offset = 0;
96 } 97 }
97 98
98 if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { 99 if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
99 ret = -EIO; 100 ret = -EIO;
100 goto out; 101 goto out;
101 } 102 }
102 103
103 if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) { 104 if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) {
104 i = 16 >> (dir->i_sb->s_blocksize_bits - 9); 105 i = 16 >> (sb->s_blocksize_bits - 9);
105 if (i + offset > (elen >> dir->i_sb->s_blocksize_bits)) 106 if (i + offset > (elen >> sb->s_blocksize_bits))
106 i = (elen >> dir->i_sb->s_blocksize_bits) - offset; 107 i = (elen >> sb->s_blocksize_bits) - offset;
107 for (num = 0; i > 0; i--) { 108 for (num = 0; i > 0; i--) {
108 block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i); 109 block = udf_get_lb_pblock(sb, &eloc, offset + i);
109 tmp = udf_tgetblk(dir->i_sb, block); 110 tmp = udf_tgetblk(sb, block);
110 if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp)) 111 if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
111 bha[num++] = tmp; 112 bha[num++] = tmp;
112 else 113 else
@@ -152,12 +153,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
152 } 153 }
153 154
154 if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 155 if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
155 if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) 156 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
156 continue; 157 continue;
157 } 158 }
158 159
159 if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { 160 if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
160 if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) 161 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
161 continue; 162 continue;
162 } 163 }
163 164
@@ -167,12 +168,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
167 continue; 168 continue;
168 } 169 }
169 170
170 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); 171 flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
171 if (!flen) 172 if (!flen)
172 continue; 173 continue;
173 174
174 tloc = lelb_to_cpu(cfi.icb.extLocation); 175 tloc = lelb_to_cpu(cfi.icb.extLocation);
175 iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0); 176 iblock = udf_get_lb_pblock(sb, &tloc, 0);
176 if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN)) 177 if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
177 goto out; 178 goto out;
178 } /* end while */ 179 } /* end while */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c9b4df5810d5..5bc71d9a674a 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1489,6 +1489,20 @@ reread:
1489 } 1489 }
1490 inode->i_generation = iinfo->i_unique; 1490 inode->i_generation = iinfo->i_unique;
1491 1491
1492 /* Sanity checks for files in ICB so that we don't get confused later */
1493 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1494 /*
1495 * For file in ICB data is stored in allocation descriptor
1496 * so sizes should match
1497 */
1498 if (iinfo->i_lenAlloc != inode->i_size)
1499 goto out;
1500 /* File in ICB has to fit in there... */
1501 if (inode->i_size > inode->i_sb->s_blocksize -
1502 udf_file_entry_alloc_offset(inode))
1503 goto out;
1504 }
1505
1492 switch (fe->icbTag.fileType) { 1506 switch (fe->icbTag.fileType) {
1493 case ICBTAG_FILE_TYPE_DIRECTORY: 1507 case ICBTAG_FILE_TYPE_DIRECTORY:
1494 inode->i_op = &udf_dir_inode_operations; 1508 inode->i_op = &udf_dir_inode_operations;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c12e260fd6c4..33b246b82c98 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -159,18 +159,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
159 struct udf_inode_info *dinfo = UDF_I(dir); 159 struct udf_inode_info *dinfo = UDF_I(dir);
160 int isdotdot = child->len == 2 && 160 int isdotdot = child->len == 2 &&
161 child->name[0] == '.' && child->name[1] == '.'; 161 child->name[0] == '.' && child->name[1] == '.';
162 struct super_block *sb = dir->i_sb;
162 163
163 size = udf_ext0_offset(dir) + dir->i_size; 164 size = udf_ext0_offset(dir) + dir->i_size;
164 f_pos = udf_ext0_offset(dir); 165 f_pos = udf_ext0_offset(dir);
165 166
166 fibh->sbh = fibh->ebh = NULL; 167 fibh->sbh = fibh->ebh = NULL;
167 fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); 168 fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1);
168 if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 169 if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
169 if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, 170 if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos,
170 &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) 171 &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
171 goto out_err; 172 goto out_err;
172 block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); 173 block = udf_get_lb_pblock(sb, &eloc, offset);
173 if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 174 if ((++offset << sb->s_blocksize_bits) < elen) {
174 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 175 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
175 epos.offset -= sizeof(struct short_ad); 176 epos.offset -= sizeof(struct short_ad);
176 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 177 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
@@ -178,7 +179,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
178 } else 179 } else
179 offset = 0; 180 offset = 0;
180 181
181 fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); 182 fibh->sbh = fibh->ebh = udf_tread(sb, block);
182 if (!fibh->sbh) 183 if (!fibh->sbh)
183 goto out_err; 184 goto out_err;
184 } 185 }
@@ -217,12 +218,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
217 } 218 }
218 219
219 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 220 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
220 if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) 221 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
221 continue; 222 continue;
222 } 223 }
223 224
224 if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { 225 if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
225 if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) 226 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
226 continue; 227 continue;
227 } 228 }
228 229
@@ -233,7 +234,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
233 if (!lfi) 234 if (!lfi)
234 continue; 235 continue;
235 236
236 flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); 237 flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
237 if (flen && udf_match(flen, fname, child->len, child->name)) 238 if (flen && udf_match(flen, fname, child->len, child->name))
238 goto out_ok; 239 goto out_ok;
239 } 240 }
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6fb7945c1e6e..ac10ca939f26 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -30,49 +30,73 @@
30#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
31#include "udf_i.h" 31#include "udf_i.h"
32 32
33static void udf_pc_to_char(struct super_block *sb, unsigned char *from, 33static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
34 int fromlen, unsigned char *to) 34 int fromlen, unsigned char *to, int tolen)
35{ 35{
36 struct pathComponent *pc; 36 struct pathComponent *pc;
37 int elen = 0; 37 int elen = 0;
38 int comp_len;
38 unsigned char *p = to; 39 unsigned char *p = to;
39 40
41 /* Reserve one byte for terminating \0 */
42 tolen--;
40 while (elen < fromlen) { 43 while (elen < fromlen) {
41 pc = (struct pathComponent *)(from + elen); 44 pc = (struct pathComponent *)(from + elen);
45 elen += sizeof(struct pathComponent);
42 switch (pc->componentType) { 46 switch (pc->componentType) {
43 case 1: 47 case 1:
44 /* 48 /*
45 * Symlink points to some place which should be agreed 49 * Symlink points to some place which should be agreed
46 * upon between originator and receiver of the media. Ignore. 50 * upon between originator and receiver of the media. Ignore.
47 */ 51 */
48 if (pc->lengthComponentIdent > 0) 52 if (pc->lengthComponentIdent > 0) {
53 elen += pc->lengthComponentIdent;
49 break; 54 break;
55 }
50 /* Fall through */ 56 /* Fall through */
51 case 2: 57 case 2:
58 if (tolen == 0)
59 return -ENAMETOOLONG;
52 p = to; 60 p = to;
53 *p++ = '/'; 61 *p++ = '/';
62 tolen--;
54 break; 63 break;
55 case 3: 64 case 3:
65 if (tolen < 3)
66 return -ENAMETOOLONG;
56 memcpy(p, "../", 3); 67 memcpy(p, "../", 3);
57 p += 3; 68 p += 3;
69 tolen -= 3;
58 break; 70 break;
59 case 4: 71 case 4:
72 if (tolen < 2)
73 return -ENAMETOOLONG;
60 memcpy(p, "./", 2); 74 memcpy(p, "./", 2);
61 p += 2; 75 p += 2;
76 tolen -= 2;
62 /* that would be . - just ignore */ 77 /* that would be . - just ignore */
63 break; 78 break;
64 case 5: 79 case 5:
65 p += udf_get_filename(sb, pc->componentIdent, p, 80 elen += pc->lengthComponentIdent;
66 pc->lengthComponentIdent); 81 if (elen > fromlen)
82 return -EIO;
83 comp_len = udf_get_filename(sb, pc->componentIdent,
84 pc->lengthComponentIdent,
85 p, tolen);
86 p += comp_len;
87 tolen -= comp_len;
88 if (tolen == 0)
89 return -ENAMETOOLONG;
67 *p++ = '/'; 90 *p++ = '/';
91 tolen--;
68 break; 92 break;
69 } 93 }
70 elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
71 } 94 }
72 if (p > to + 1) 95 if (p > to + 1)
73 p[-1] = '\0'; 96 p[-1] = '\0';
74 else 97 else
75 p[0] = '\0'; 98 p[0] = '\0';
99 return 0;
76} 100}
77 101
78static int udf_symlink_filler(struct file *file, struct page *page) 102static int udf_symlink_filler(struct file *file, struct page *page)
@@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
80 struct inode *inode = page->mapping->host; 104 struct inode *inode = page->mapping->host;
81 struct buffer_head *bh = NULL; 105 struct buffer_head *bh = NULL;
82 unsigned char *symlink; 106 unsigned char *symlink;
83 int err = -EIO; 107 int err;
84 unsigned char *p = kmap(page); 108 unsigned char *p = kmap(page);
85 struct udf_inode_info *iinfo; 109 struct udf_inode_info *iinfo;
86 uint32_t pos; 110 uint32_t pos;
87 111
112 /* We don't support symlinks longer than one block */
113 if (inode->i_size > inode->i_sb->s_blocksize) {
114 err = -ENAMETOOLONG;
115 goto out_unmap;
116 }
117
88 iinfo = UDF_I(inode); 118 iinfo = UDF_I(inode);
89 pos = udf_block_map(inode, 0); 119 pos = udf_block_map(inode, 0);
90 120
@@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
94 } else { 124 } else {
95 bh = sb_bread(inode->i_sb, pos); 125 bh = sb_bread(inode->i_sb, pos);
96 126
97 if (!bh) 127 if (!bh) {
98 goto out; 128 err = -EIO;
129 goto out_unlock_inode;
130 }
99 131
100 symlink = bh->b_data; 132 symlink = bh->b_data;
101 } 133 }
102 134
103 udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p); 135 err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
104 brelse(bh); 136 brelse(bh);
137 if (err)
138 goto out_unlock_inode;
105 139
106 up_read(&iinfo->i_data_sem); 140 up_read(&iinfo->i_data_sem);
107 SetPageUptodate(page); 141 SetPageUptodate(page);
@@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
109 unlock_page(page); 143 unlock_page(page);
110 return 0; 144 return 0;
111 145
112out: 146out_unlock_inode:
113 up_read(&iinfo->i_data_sem); 147 up_read(&iinfo->i_data_sem);
114 SetPageError(page); 148 SetPageError(page);
149out_unmap:
115 kunmap(page); 150 kunmap(page);
116 unlock_page(page); 151 unlock_page(page);
117 return err; 152 return err;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 1cc3c993ebd0..47bb3f5ca360 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
211} 211}
212 212
213/* unicode.c */ 213/* unicode.c */
214extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int); 214extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
215 int);
215extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *, 216extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
216 int); 217 int);
217extern int udf_build_ustr(struct ustr *, dstring *, int); 218extern int udf_build_ustr(struct ustr *, dstring *, int);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index afd470e588ff..b84fee372734 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,7 +28,8 @@
28 28
29#include "udf_sb.h" 29#include "udf_sb.h"
30 30
31static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int); 31static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
32 int);
32 33
33static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen) 34static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
34{ 35{
@@ -333,8 +334,8 @@ try_again:
333 return u_len + 1; 334 return u_len + 1;
334} 335}
335 336
336int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname, 337int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
337 int flen) 338 uint8_t *dname, int dlen)
338{ 339{
339 struct ustr *filename, *unifilename; 340 struct ustr *filename, *unifilename;
340 int len = 0; 341 int len = 0;
@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
347 if (!unifilename) 348 if (!unifilename)
348 goto out1; 349 goto out1;
349 350
350 if (udf_build_ustr_exact(unifilename, sname, flen)) 351 if (udf_build_ustr_exact(unifilename, sname, slen))
351 goto out2; 352 goto out2;
352 353
353 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { 354 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
366 } else 367 } else
367 goto out2; 368 goto out2;
368 369
369 len = udf_translate_to_linux(dname, filename->u_name, filename->u_len, 370 len = udf_translate_to_linux(dname, dlen,
371 filename->u_name, filename->u_len,
370 unifilename->u_name, unifilename->u_len); 372 unifilename->u_name, unifilename->u_len);
371out2: 373out2:
372 kfree(unifilename); 374 kfree(unifilename);
@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
403#define EXT_MARK '.' 405#define EXT_MARK '.'
404#define CRC_MARK '#' 406#define CRC_MARK '#'
405#define EXT_SIZE 5 407#define EXT_SIZE 5
408/* Number of chars we need to store generated CRC to make filename unique */
409#define CRC_LEN 5
406 410
407static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, 411static int udf_translate_to_linux(uint8_t *newName, int newLen,
408 int udfLen, uint8_t *fidName, 412 uint8_t *udfName, int udfLen,
409 int fidNameLen) 413 uint8_t *fidName, int fidNameLen)
410{ 414{
411 int index, newIndex = 0, needsCRC = 0; 415 int index, newIndex = 0, needsCRC = 0;
412 int extIndex = 0, newExtIndex = 0, hasExt = 0; 416 int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
439 newExtIndex = newIndex; 443 newExtIndex = newIndex;
440 } 444 }
441 } 445 }
442 if (newIndex < 256) 446 if (newIndex < newLen)
443 newName[newIndex++] = curr; 447 newName[newIndex++] = curr;
444 else 448 else
445 needsCRC = 1; 449 needsCRC = 1;
@@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
467 } 471 }
468 ext[localExtIndex++] = curr; 472 ext[localExtIndex++] = curr;
469 } 473 }
470 maxFilenameLen = 250 - localExtIndex; 474 maxFilenameLen = newLen - CRC_LEN - localExtIndex;
471 if (newIndex > maxFilenameLen) 475 if (newIndex > maxFilenameLen)
472 newIndex = maxFilenameLen; 476 newIndex = maxFilenameLen;
473 else 477 else
474 newIndex = newExtIndex; 478 newIndex = newExtIndex;
475 } else if (newIndex > 250) 479 } else if (newIndex > newLen - CRC_LEN)
476 newIndex = 250; 480 newIndex = newLen - CRC_LEN;
477 newName[newIndex++] = CRC_MARK; 481 newName[newIndex++] = CRC_MARK;
478 valueCRC = crc_itu_t(0, fidName, fidNameLen); 482 valueCRC = crc_itu_t(0, fidName, fidNameLen);
479 newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8); 483 newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3ca9b751f122..b95dc32a6e6b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
196struct acpi_processor { 196struct acpi_processor {
197 acpi_handle handle; 197 acpi_handle handle;
198 u32 acpi_id; 198 u32 acpi_id;
199 u32 apic_id; 199 u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
200 u32 id; 200 u32 id; /* CPU logical ID allocated by OS */
201 u32 pblk; 201 u32 pblk;
202 int performance_platform_limit; 202 int performance_platform_limit;
203 int throttling_platform_limit; 203 int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
310#endif /* CONFIG_CPU_FREQ */ 310#endif /* CONFIG_CPU_FREQ */
311 311
312/* in processor_core.c */ 312/* in processor_core.c */
313int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); 313int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
314int acpi_map_cpuid(int apic_id, u32 acpi_id); 314int acpi_map_cpuid(int phys_id, u32 acpi_id);
315int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); 315int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
316 316
317/* in processor_pdc.c */ 317/* in processor_pdc.c */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 08848050922e..db284bff29dc 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
136 136
137static inline void __tlb_reset_range(struct mmu_gather *tlb) 137static inline void __tlb_reset_range(struct mmu_gather *tlb)
138{ 138{
139 tlb->start = TASK_SIZE; 139 if (tlb->fullmm) {
140 tlb->end = 0; 140 tlb->start = tlb->end = ~0;
141 } else {
142 tlb->start = TASK_SIZE;
143 tlb->end = 0;
144 }
141} 145}
142 146
143/* 147/*
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8ba35c622e22..e1b2e8b98af7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
901extern int drm_wait_vblank(struct drm_device *dev, void *data, 901extern int drm_wait_vblank(struct drm_device *dev, void *data,
902 struct drm_file *filp); 902 struct drm_file *filp);
903extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 903extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
904extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
904extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 905extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
905 struct timeval *vblanktime); 906 struct timeval *vblanktime);
906extern void drm_send_vblank_event(struct drm_device *dev, int crtc, 907extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
907 struct drm_pending_vblank_event *e); 908 struct drm_pending_vblank_event *e);
909extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
910 struct drm_pending_vblank_event *e);
908extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 911extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
912extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
909extern int drm_vblank_get(struct drm_device *dev, int crtc); 913extern int drm_vblank_get(struct drm_device *dev, int crtc);
910extern void drm_vblank_put(struct drm_device *dev, int crtc); 914extern void drm_vblank_put(struct drm_device *dev, int crtc);
911extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 915extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 780511a459c0..1e6ae1458f7a 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,13 +119,6 @@ struct drm_gem_object {
119 * simply leave it as NULL. 119 * simply leave it as NULL.
120 */ 120 */
121 struct dma_buf_attachment *import_attach; 121 struct dma_buf_attachment *import_attach;
122
123 /**
124 * dumb - created as dumb buffer
125 * Whether the gem object was created using the dumb buffer interface
126 * as such it may not be used for GPU rendering.
127 */
128 bool dumb;
129}; 122};
130 123
131void drm_gem_object_release(struct drm_gem_object *obj); 124void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index 59822a995858..b5e6b0069ac7 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -11,7 +11,7 @@
11#define _DT_BINDINGS_THERMAL_THERMAL_H 11#define _DT_BINDINGS_THERMAL_THERMAL_H
12 12
13/* On cooling devices upper and lower limits */ 13/* On cooling devices upper and lower limits */
14#define THERMAL_NO_LIMIT (-1UL) 14#define THERMAL_NO_LIMIT (~0)
15 15
16#endif 16#endif
17 17
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 856d381b1d5b..d459cd17b477 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
147 147
148#ifdef CONFIG_ACPI_HOTPLUG_CPU 148#ifdef CONFIG_ACPI_HOTPLUG_CPU
149/* Arch dependent functions for cpu hotplug support */ 149/* Arch dependent functions for cpu hotplug support */
150int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); 150int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
151int acpi_unmap_lsapic(int cpu); 151int acpi_unmap_cpu(int cpu);
152#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 152#endif /* CONFIG_ACPI_HOTPLUG_CPU */
153 153
154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); 154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 0c04917c2f12..af84234e1f6e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -47,6 +47,7 @@ struct sk_buff;
47 47
48struct audit_krule { 48struct audit_krule {
49 int vers_ops; 49 int vers_ops;
50 u32 pflags;
50 u32 flags; 51 u32 flags;
51 u32 listnr; 52 u32 listnr;
52 u32 action; 53 u32 action;
@@ -64,6 +65,9 @@ struct audit_krule {
64 u64 prio; 65 u64 prio;
65}; 66};
66 67
68/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
69#define AUDIT_LOGINUID_LEGACY 0x1
70
67struct audit_field { 71struct audit_field {
68 u32 type; 72 u32 type;
69 union { 73 union {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..5735e7130d63 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
34 unsigned long flags; /* BLK_MQ_F_* flags */ 34 unsigned long flags; /* BLK_MQ_F_* flags */
35 35
36 struct request_queue *queue; 36 struct request_queue *queue;
37 unsigned int queue_num;
38 struct blk_flush_queue *fq; 37 struct blk_flush_queue *fq;
39 38
40 void *driver_data; 39 void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
54 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 53 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
55 54
56 unsigned int numa_node; 55 unsigned int numa_node;
57 unsigned int cmd_size; /* per-request extra data */ 56 unsigned int queue_num;
58 57
59 atomic_t nr_active; 58 atomic_t nr_active;
60 59
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
195struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 194struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
196struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 195struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
197 196
197int blk_mq_request_started(struct request *rq);
198void blk_mq_start_request(struct request *rq); 198void blk_mq_start_request(struct request *rq);
199void blk_mq_end_request(struct request *rq, int error); 199void blk_mq_end_request(struct request *rq, int error);
200void __blk_mq_end_request(struct request *rq, int error); 200void __blk_mq_end_request(struct request *rq, int error);
201 201
202void blk_mq_requeue_request(struct request *rq); 202void blk_mq_requeue_request(struct request *rq);
203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
204void blk_mq_cancel_requeue_work(struct request_queue *q);
204void blk_mq_kick_requeue_list(struct request_queue *q); 205void blk_mq_kick_requeue_list(struct request_queue *q);
206void blk_mq_abort_requeue_list(struct request_queue *q);
205void blk_mq_complete_request(struct request *rq); 207void blk_mq_complete_request(struct request *rq);
206 208
207void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 209void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
212void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 214void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
213void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 215void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
214 void *priv); 216 void *priv);
217void blk_mq_unfreeze_queue(struct request_queue *q);
218void blk_mq_freeze_queue_start(struct request_queue *q);
215 219
216/* 220/*
217 * Driver command data is immediately after the request. So subtract request 221 * Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 445d59231bc4..c294e3e25e37 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_HASHED, /* on IO scheduler merge hash */ 191 __REQ_HASHED, /* on IO scheduler merge hash */
192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
193 __REQ_NO_TIMEOUT, /* requests may never expire */
193 __REQ_NR_BITS, /* stops here */ 194 __REQ_NR_BITS, /* stops here */
194}; 195};
195 196
@@ -243,5 +244,6 @@ enum rq_flag_bits {
243#define REQ_PM (1ULL << __REQ_PM) 244#define REQ_PM (1ULL << __REQ_PM)
244#define REQ_HASHED (1ULL << __REQ_HASHED) 245#define REQ_HASHED (1ULL << __REQ_HASHED)
245#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 246#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
247#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
246 248
247#endif /* __LINUX_BLK_TYPES_H */ 249#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5d86416d35f2..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
87 struct ceph_osd_data osd_data; 87 struct ceph_osd_data osd_data;
88 } extent; 88 } extent;
89 struct { 89 struct {
90 __le32 name_len; 90 u32 name_len;
91 __le32 value_len; 91 u32 value_len;
92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ 92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ 93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
94 struct ceph_osd_data osd_data; 94 struct ceph_osd_data osd_data;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a1c81f80978e..33063f872ee3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
215 } 215 }
216} 216}
217 217
218static __always_inline void __assign_once_size(volatile void *p, void *res, int size) 218static __always_inline void __write_once_size(volatile void *p, void *res, int size)
219{ 219{
220 switch (size) { 220 switch (size) {
221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
235/* 235/*
236 * Prevent the compiler from merging or refetching reads or writes. The 236 * Prevent the compiler from merging or refetching reads or writes. The
237 * compiler is also forbidden from reordering successive instances of 237 * compiler is also forbidden from reordering successive instances of
238 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the 238 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
239 * compiler is aware of some particular ordering. One way to make the 239 * compiler is aware of some particular ordering. One way to make the
240 * compiler aware of ordering is to put the two invocations of READ_ONCE, 240 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. 241 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
242 * 242 *
243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244 * data types like structs or unions. If the size of the accessed data 244 * data types like structs or unions. If the size of the accessed data
245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246 * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a 246 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
247 * compile-time warning. 247 * compile-time warning.
248 * 248 *
249 * Their two major use cases are: (1) Mediating communication between 249 * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
257#define READ_ONCE(x) \ 257#define READ_ONCE(x) \
258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) 258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259 259
260#define ASSIGN_ONCE(val, x) \ 260#define WRITE_ONCE(x, val) \
261 ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) 261 ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
262 262
263#endif /* __KERNEL__ */ 263#endif /* __KERNEL__ */
264 264
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c303d383def1..bd955270d5aa 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
50of_cpufreq_cooling_register(struct device_node *np, 50of_cpufreq_cooling_register(struct device_node *np,
51 const struct cpumask *clip_cpus) 51 const struct cpumask *clip_cpus)
52{ 52{
53 return NULL; 53 return ERR_PTR(-ENOSYS);
54} 54}
55#endif 55#endif
56 56
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
65static inline struct thermal_cooling_device * 65static inline struct thermal_cooling_device *
66cpufreq_cooling_register(const struct cpumask *clip_cpus) 66cpufreq_cooling_register(const struct cpumask *clip_cpus)
67{ 67{
68 return NULL; 68 return ERR_PTR(-ENOSYS);
69} 69}
70static inline struct thermal_cooling_device * 70static inline struct thermal_cooling_device *
71of_cpufreq_cooling_register(struct device_node *np, 71of_cpufreq_cooling_register(struct device_node *np,
72 const struct cpumask *clip_cpus) 72 const struct cpumask *clip_cpus)
73{ 73{
74 return NULL; 74 return ERR_PTR(-ENOSYS);
75} 75}
76static inline 76static inline
77void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 77void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index a07e087f54b2..ab70f3bc44ad 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,6 @@ struct cpuidle_state {
53}; 53};
54 54
55/* Idle State Flags */ 55/* Idle State Flags */
56#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
57#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 56#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
58#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 57#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
59 58
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
89/** 88/**
90 * cpuidle_get_last_residency - retrieves the last state's residency time 89 * cpuidle_get_last_residency - retrieves the last state's residency time
91 * @dev: the target CPU 90 * @dev: the target CPU
92 *
93 * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
94 */ 91 */
95static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) 92static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
96{ 93{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f90c0282c114..42efe13077b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
135#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 135#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
136 136
137/* File was opened by fanotify and shouldn't generate fanotify events */ 137/* File was opened by fanotify and shouldn't generate fanotify events */
138#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 138#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
139 139
140/* 140/*
141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector 141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 55b685719d52..09460d6d6682 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -11,6 +11,10 @@ extern void genl_unlock(void);
11extern int lockdep_genl_is_held(void); 11extern int lockdep_genl_is_held(void);
12#endif 12#endif
13 13
14/* for synchronisation between af_netlink and genetlink */
15extern atomic_t genl_sk_destructing_cnt;
16extern wait_queue_head_t genl_sk_destructing_waitq;
17
14/** 18/**
15 * rcu_dereference_genl - rcu_dereference with debug checking 19 * rcu_dereference_genl - rcu_dereference with debug checking
16 * @p: The pointer to read, prior to dereferencing 20 * @p: The pointer to read, prior to dereferencing
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> 13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
14 */ 14 */
15 15
16/* Shifted versions of the command enable bits are be used if the command
17 * has no arguments (see kdb_check_flags). This allows commands, such as
18 * go, to have different permissions depending upon whether it is called
19 * with an argument.
20 */
21#define KDB_ENABLE_NO_ARGS_SHIFT 10
22
16typedef enum { 23typedef enum {
17 KDB_REPEAT_NONE = 0, /* Do not repeat this command */ 24 KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
18 KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ 25 KDB_ENABLE_MEM_READ = (1 << 1),
19 KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ 26 KDB_ENABLE_MEM_WRITE = (1 << 2),
20} kdb_repeat_t; 27 KDB_ENABLE_REG_READ = (1 << 3),
28 KDB_ENABLE_REG_WRITE = (1 << 4),
29 KDB_ENABLE_INSPECT = (1 << 5),
30 KDB_ENABLE_FLOW_CTRL = (1 << 6),
31 KDB_ENABLE_SIGNAL = (1 << 7),
32 KDB_ENABLE_REBOOT = (1 << 8),
33 /* User exposed values stop here, all remaining flags are
34 * exclusively used to describe a commands behaviour.
35 */
36
37 KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
38 KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
39
40 KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
41 KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
42 << KDB_ENABLE_NO_ARGS_SHIFT,
43 KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
44 << KDB_ENABLE_NO_ARGS_SHIFT,
45 KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
46 << KDB_ENABLE_NO_ARGS_SHIFT,
47 KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
48 << KDB_ENABLE_NO_ARGS_SHIFT,
49 KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
50 << KDB_ENABLE_NO_ARGS_SHIFT,
51 KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
52 << KDB_ENABLE_NO_ARGS_SHIFT,
53 KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
54 << KDB_ENABLE_NO_ARGS_SHIFT,
55 KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
56 << KDB_ENABLE_NO_ARGS_SHIFT,
57 KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
58 << KDB_ENABLE_NO_ARGS_SHIFT,
59 KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
60
61 KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
62 KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
63} kdb_cmdflags_t;
21 64
22typedef int (*kdb_func_t)(int, const char **); 65typedef int (*kdb_func_t)(int, const char **);
23 66
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
62#define KDB_BADLENGTH (-19) 105#define KDB_BADLENGTH (-19)
63#define KDB_NOBP (-20) 106#define KDB_NOBP (-20)
64#define KDB_BADADDR (-21) 107#define KDB_BADADDR (-21)
108#define KDB_NOPERM (-22)
65 109
66/* 110/*
67 * kdb_diemsg 111 * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
146 190
147/* Dynamic kdb shell command registration */ 191/* Dynamic kdb shell command registration */
148extern int kdb_register(char *, kdb_func_t, char *, char *, short); 192extern int kdb_register(char *, kdb_func_t, char *, char *, short);
149extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, 193extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
150 short, kdb_repeat_t); 194 short, kdb_cmdflags_t);
151extern int kdb_unregister(char *); 195extern int kdb_unregister(char *);
152#else /* ! CONFIG_KGDB_KDB */ 196#else /* ! CONFIG_KGDB_KDB */
153static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 197static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
154static inline void kdb_init(int level) {} 198static inline void kdb_init(int level) {}
155static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, 199static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
156 char *help, short minlen) { return 0; } 200 char *help, short minlen) { return 0; }
157static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, 201static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
158 char *help, short minlen, 202 char *help, short minlen,
159 kdb_repeat_t repeat) { return 0; } 203 kdb_cmdflags_t flags) { return 0; }
160static inline int kdb_unregister(char *cmd) { return 0; } 204static inline int kdb_unregister(char *cmd) { return 0; }
161#endif /* CONFIG_KGDB_KDB */ 205#endif /* CONFIG_KGDB_KDB */
162enum { 206enum {
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 575a86c7fcbd..f742b6717d52 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -50,6 +50,8 @@ enum {
50 STMPE_IDX_GPEDR_MSB, 50 STMPE_IDX_GPEDR_MSB,
51 STMPE_IDX_GPRER_LSB, 51 STMPE_IDX_GPRER_LSB,
52 STMPE_IDX_GPFER_LSB, 52 STMPE_IDX_GPFER_LSB,
53 STMPE_IDX_GPPUR_LSB,
54 STMPE_IDX_GPPDR_LSB,
53 STMPE_IDX_GPAFR_U_MSB, 55 STMPE_IDX_GPAFR_U_MSB,
54 STMPE_IDX_IEGPIOR_LSB, 56 STMPE_IDX_IEGPIOR_LSB,
55 STMPE_IDX_ISGPIOR_LSB, 57 STMPE_IDX_ISGPIOR_LSB,
@@ -113,24 +115,6 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
113extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); 115extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
114extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); 116extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
115 117
116struct matrix_keymap_data;
117
118/**
119 * struct stmpe_keypad_platform_data - STMPE keypad platform data
120 * @keymap_data: key map table and size
121 * @debounce_ms: debounce interval, in ms. Maximum is
122 * %STMPE_KEYPAD_MAX_DEBOUNCE.
123 * @scan_count: number of key scanning cycles to confirm key data.
124 * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
125 * @no_autorepeat: disable key autorepeat
126 */
127struct stmpe_keypad_platform_data {
128 const struct matrix_keymap_data *keymap_data;
129 unsigned int debounce_ms;
130 unsigned int scan_count;
131 bool no_autorepeat;
132};
133
134#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) 118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
135 119
136/** 120/**
@@ -199,7 +183,6 @@ struct stmpe_ts_platform_data {
199 * @irq_gpio: gpio number over which irq will be requested (significant only if 183 * @irq_gpio: gpio number over which irq will be requested (significant only if
200 * irq_over_gpio is true) 184 * irq_over_gpio is true)
201 * @gpio: GPIO-specific platform data 185 * @gpio: GPIO-specific platform data
202 * @keypad: keypad-specific platform data
203 * @ts: touchscreen-specific platform data 186 * @ts: touchscreen-specific platform data
204 */ 187 */
205struct stmpe_platform_data { 188struct stmpe_platform_data {
@@ -212,7 +195,6 @@ struct stmpe_platform_data {
212 int autosleep_timeout; 195 int autosleep_timeout;
213 196
214 struct stmpe_gpio_platform_data *gpio; 197 struct stmpe_gpio_platform_data *gpio;
215 struct stmpe_keypad_platform_data *keypad;
216 struct stmpe_ts_platform_data *ts; 198 struct stmpe_ts_platform_data *ts;
217}; 199};
218 200
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..80fc92a49649 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
1952#if VM_GROWSUP 1952#if VM_GROWSUP
1953extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1953extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1954#else 1954#else
1955 #define expand_upwards(vma, address) do { } while (0) 1955 #define expand_upwards(vma, address) (0)
1956#endif 1956#endif
1957 1957
1958/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1958/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 375af80bde7d..f767a0de611f 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -137,6 +137,7 @@ struct sdhci_host {
137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ 139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
140#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
140 141
141 unsigned int version; /* SDHCI spec. version */ 142 unsigned int version; /* SDHCI spec. version */
142 143
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c31f74d76ebd..52fd8e8694cf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
852 * 3. Update dev->stats asynchronously and atomically, and define 852 * 3. Update dev->stats asynchronously and atomically, and define
853 * neither operation. 853 * neither operation.
854 * 854 *
855 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 855 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
856 * If device support VLAN filtering this function is called when a 856 * If device support VLAN filtering this function is called when a
857 * VLAN id is registered. 857 * VLAN id is registered.
858 * 858 *
859 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 859 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
860 * If device support VLAN filtering this function is called when a 860 * If device support VLAN filtering this function is called when a
861 * VLAN id is unregistered. 861 * VLAN id is unregistered.
862 * 862 *
@@ -1012,12 +1012,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1012 * Callback to use for xmit over the accelerated station. This 1012 * Callback to use for xmit over the accelerated station. This
1013 * is used in place of ndo_start_xmit on accelerated net 1013 * is used in place of ndo_start_xmit on accelerated net
1014 * devices. 1014 * devices.
1015 * bool (*ndo_gso_check) (struct sk_buff *skb, 1015 * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1016 * struct net_device *dev); 1016 * struct net_device *dev
1017 * netdev_features_t features);
1017 * Called by core transmit path to determine if device is capable of 1018 * Called by core transmit path to determine if device is capable of
1018 * performing GSO on a packet. The device returns true if it is 1019 * performing offload operations on a given packet. This is to give
1019 * able to GSO the packet, false otherwise. If the return value is 1020 * the device an opportunity to implement any restrictions that cannot
1020 * false the stack will do software GSO. 1021 * be otherwise expressed by feature flags. The check is called with
1022 * the set of features that the stack has calculated and it returns
1023 * those the driver believes to be appropriate.
1021 * 1024 *
1022 * int (*ndo_switch_parent_id_get)(struct net_device *dev, 1025 * int (*ndo_switch_parent_id_get)(struct net_device *dev,
1023 * struct netdev_phys_item_id *psid); 1026 * struct netdev_phys_item_id *psid);
@@ -1178,8 +1181,9 @@ struct net_device_ops {
1178 struct net_device *dev, 1181 struct net_device *dev,
1179 void *priv); 1182 void *priv);
1180 int (*ndo_get_lock_subclass)(struct net_device *dev); 1183 int (*ndo_get_lock_subclass)(struct net_device *dev);
1181 bool (*ndo_gso_check) (struct sk_buff *skb, 1184 netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1182 struct net_device *dev); 1185 struct net_device *dev,
1186 netdev_features_t features);
1183#ifdef CONFIG_NET_SWITCHDEV 1187#ifdef CONFIG_NET_SWITCHDEV
1184 int (*ndo_switch_parent_id_get)(struct net_device *dev, 1188 int (*ndo_switch_parent_id_get)(struct net_device *dev,
1185 struct netdev_phys_item_id *psid); 1189 struct netdev_phys_item_id *psid);
@@ -2081,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
2081 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2085 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2082#define for_each_netdev_in_bond_rcu(bond, slave) \ 2086#define for_each_netdev_in_bond_rcu(bond, slave) \
2083 for_each_netdev_rcu(&init_net, slave) \ 2087 for_each_netdev_rcu(&init_net, slave) \
2084 if (netdev_master_upper_dev_get_rcu(slave) == bond) 2088 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2085#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2089#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2086 2090
2087static inline struct net_device *next_net_device(struct net_device *dev) 2091static inline struct net_device *next_net_device(struct net_device *dev)
@@ -3611,8 +3615,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
3611 netdev_features_t features) 3615 netdev_features_t features)
3612{ 3616{
3613 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 3617 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
3614 (dev->netdev_ops->ndo_gso_check &&
3615 !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
3616 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 3618 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
3617 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 3619 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
3618} 3620}
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9e572daa15d5..02fc86d2348e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,8 +46,8 @@ struct netlink_kernel_cfg {
46 unsigned int flags; 46 unsigned int flags;
47 void (*input)(struct sk_buff *skb); 47 void (*input)(struct sk_buff *skb);
48 struct mutex *cb_mutex; 48 struct mutex *cb_mutex;
49 int (*bind)(int group); 49 int (*bind)(struct net *net, int group);
50 void (*unbind)(int group); 50 void (*unbind)(struct net *net, int group);
51 bool (*compare)(struct net *net, struct sock *sk); 51 bool (*compare)(struct net *net, struct sock *sk);
52}; 52};
53 53
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1e37fbb78f7a..ddea982355f3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -74,6 +74,9 @@ struct nfs_client {
74 /* idmapper */ 74 /* idmapper */
75 struct idmap * cl_idmap; 75 struct idmap * cl_idmap;
76 76
77 /* Client owner identifier */
78 const char * cl_owner_id;
79
77 /* Our own IP address, as a null-terminated string. 80 /* Our own IP address, as a null-terminated string.
78 * This is used to generate the mv0 callback address. 81 * This is used to generate the mv0 callback address.
79 */ 82 */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7ea069cd3257..4b3736f7065c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -251,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
251#define FGP_NOWAIT 0x00000020 251#define FGP_NOWAIT 0x00000020
252 252
253struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 253struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
254 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); 254 int fgp_flags, gfp_t cache_gfp_mask);
255 255
256/** 256/**
257 * find_get_page - find and get a page reference 257 * find_get_page - find and get a page reference
@@ -266,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
266static inline struct page *find_get_page(struct address_space *mapping, 266static inline struct page *find_get_page(struct address_space *mapping,
267 pgoff_t offset) 267 pgoff_t offset)
268{ 268{
269 return pagecache_get_page(mapping, offset, 0, 0, 0); 269 return pagecache_get_page(mapping, offset, 0, 0);
270} 270}
271 271
272static inline struct page *find_get_page_flags(struct address_space *mapping, 272static inline struct page *find_get_page_flags(struct address_space *mapping,
273 pgoff_t offset, int fgp_flags) 273 pgoff_t offset, int fgp_flags)
274{ 274{
275 return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); 275 return pagecache_get_page(mapping, offset, fgp_flags, 0);
276} 276}
277 277
278/** 278/**
@@ -292,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
292static inline struct page *find_lock_page(struct address_space *mapping, 292static inline struct page *find_lock_page(struct address_space *mapping,
293 pgoff_t offset) 293 pgoff_t offset)
294{ 294{
295 return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); 295 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
296} 296}
297 297
298/** 298/**
@@ -319,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
319{ 319{
320 return pagecache_get_page(mapping, offset, 320 return pagecache_get_page(mapping, offset,
321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
322 gfp_mask, gfp_mask & GFP_RECLAIM_MASK); 322 gfp_mask);
323} 323}
324 324
325/** 325/**
@@ -340,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
340{ 340{
341 return pagecache_get_page(mapping, index, 341 return pagecache_get_page(mapping, index,
342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
343 mapping_gfp_mask(mapping), 343 mapping_gfp_mask(mapping));
344 GFP_NOFS);
345} 344}
346 345
347struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 346struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 486e84ccb1f9..4f7a61ca4b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,11 +79,6 @@ struct perf_branch_stack {
79 struct perf_branch_entry entries[0]; 79 struct perf_branch_entry entries[0];
80}; 80};
81 81
82struct perf_regs {
83 __u64 abi;
84 struct pt_regs *regs;
85};
86
87struct task_struct; 82struct task_struct;
88 83
89/* 84/*
@@ -610,7 +605,14 @@ struct perf_sample_data {
610 u32 reserved; 605 u32 reserved;
611 } cpu_entry; 606 } cpu_entry;
612 struct perf_callchain_entry *callchain; 607 struct perf_callchain_entry *callchain;
608
609 /*
610 * regs_user may point to task_pt_regs or to regs_user_copy, depending
611 * on arch details.
612 */
613 struct perf_regs regs_user; 613 struct perf_regs regs_user;
614 struct pt_regs regs_user_copy;
615
614 struct perf_regs regs_intr; 616 struct perf_regs regs_intr;
615 u64 stack_user_size; 617 u64 stack_user_size;
616} ____cacheline_aligned; 618} ____cacheline_aligned;
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
1#ifndef _LINUX_PERF_REGS_H 1#ifndef _LINUX_PERF_REGS_H
2#define _LINUX_PERF_REGS_H 2#define _LINUX_PERF_REGS_H
3 3
4struct perf_regs {
5 __u64 abi;
6 struct pt_regs *regs;
7};
8
4#ifdef CONFIG_HAVE_PERF_REGS 9#ifdef CONFIG_HAVE_PERF_REGS
5#include <asm/perf_regs.h> 10#include <asm/perf_regs.h>
6u64 perf_reg_value(struct pt_regs *regs, int idx); 11u64 perf_reg_value(struct pt_regs *regs, int idx);
7int perf_reg_validate(u64 mask); 12int perf_reg_validate(u64 mask);
8u64 perf_reg_abi(struct task_struct *task); 13u64 perf_reg_abi(struct task_struct *task);
14void perf_get_regs_user(struct perf_regs *regs_user,
15 struct pt_regs *regs,
16 struct pt_regs *regs_user_copy);
9#else 17#else
10static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 18static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
11{ 19{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
21{ 29{
22 return PERF_SAMPLE_REGS_ABI_NONE; 30 return PERF_SAMPLE_REGS_ABI_NONE;
23} 31}
32
33static inline void perf_get_regs_user(struct perf_regs *regs_user,
34 struct pt_regs *regs,
35 struct pt_regs *regs_user_copy)
36{
37 regs_user->regs = task_pt_regs(current);
38 regs_user->abi = perf_reg_abi(current);
39}
24#endif /* CONFIG_HAVE_PERF_REGS */ 40#endif /* CONFIG_HAVE_PERF_REGS */
25#endif /* _LINUX_PERF_REGS_H */ 41#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
index e9e6cfbfbb58..eb7d4a135a9e 100644
--- a/include/linux/phy/omap_control_phy.h
+++ b/include/linux/phy/omap_control_phy.h
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
66#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 66#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
67 67
68#define OMAP_CTRL_PCIE_PCS_MASK 0xff 68#define OMAP_CTRL_PCIE_PCS_MASK 0xff
69#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8 69#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
70 70
71#define OMAP_CTRL_USB2_PHY_PD BIT(28) 71#define OMAP_CTRL_USB2_PHY_PD BIT(28)
72 72
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
79void omap_control_phy_power(struct device *dev, int on); 79void omap_control_phy_power(struct device *dev, int on);
80void omap_control_usb_set_mode(struct device *dev, 80void omap_control_usb_set_mode(struct device *dev,
81 enum omap_control_usb_mode mode); 81 enum omap_control_usb_mode mode);
82void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay); 82void omap_control_pcie_pcs(struct device *dev, u8 delay);
83#else 83#else
84 84
85static inline void omap_control_phy_power(struct device *dev, int on) 85static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
91{ 91{
92} 92}
93 93
94static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) 94static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
95{ 95{
96} 96}
97#endif 97#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 6cd20d5e651b..a9edab2c787a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -271,6 +271,8 @@ typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
271int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 271int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
272 void *data); 272 void *data);
273void of_genpd_del_provider(struct device_node *np); 273void of_genpd_del_provider(struct device_node *np);
274struct generic_pm_domain *of_genpd_get_from_provider(
275 struct of_phandle_args *genpdspec);
274 276
275struct generic_pm_domain *__of_genpd_xlate_simple( 277struct generic_pm_domain *__of_genpd_xlate_simple(
276 struct of_phandle_args *genpdspec, 278 struct of_phandle_args *genpdspec,
@@ -288,6 +290,12 @@ static inline int __of_genpd_add_provider(struct device_node *np,
288} 290}
289static inline void of_genpd_del_provider(struct device_node *np) {} 291static inline void of_genpd_del_provider(struct device_node *np) {}
290 292
293static inline struct generic_pm_domain *of_genpd_get_from_provider(
294 struct of_phandle_args *genpdspec)
295{
296 return NULL;
297}
298
291#define __of_genpd_xlate_simple NULL 299#define __of_genpd_xlate_simple NULL
292#define __of_genpd_xlate_onecell NULL 300#define __of_genpd_xlate_onecell NULL
293 301
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c0c2bce6b0b7..d9d7e7e56352 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
37 atomic_t refcount; 37 atomic_t refcount;
38 38
39 /* 39 /*
40 * Count of child anon_vmas and VMAs which points to this anon_vma.
41 *
42 * This counter is used for making decision about reusing anon_vma
43 * instead of forking new one. See comments in function anon_vma_clone.
44 */
45 unsigned degree;
46
47 struct anon_vma *parent; /* Parent of this anon_vma */
48
49 /*
40 * NOTE: the LSB of the rb_root.rb_node is set by 50 * NOTE: the LSB of the rb_root.rb_node is set by
41 * mm_take_all_locks() _after_ taking the above lock. So the 51 * mm_take_all_locks() _after_ taking the above lock. So the
42 * rb_root must only be read/written after taking the above lock 52 * rb_root must only be read/written after taking the above lock
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c611a02fbc51..fc52e307efab 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -38,7 +38,7 @@
38#define THERMAL_CSTATE_INVALID -1UL 38#define THERMAL_CSTATE_INVALID -1UL
39 39
40/* No upper/lower limit requirement */ 40/* No upper/lower limit requirement */
41#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID 41#define THERMAL_NO_LIMIT ((u32)~0)
42 42
43/* Unit conversion macros */ 43/* Unit conversion macros */
44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
177 struct writeback_control *wbc, writepage_t writepage, 177 struct writeback_control *wbc, writepage_t writepage,
178 void *data); 178 void *data);
179int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 179int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
180void set_page_dirty_balance(struct page *page);
181void writeback_set_ratelimit(void); 180void writeback_set_ratelimit(void);
182void tag_pages_for_writeback(struct address_space *mapping, 181void tag_pages_for_writeback(struct address_space *mapping,
183 pgoff_t start, pgoff_t end); 182 pgoff_t start, pgoff_t end);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index af10c2cf8a1d..6c92415311ca 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -27,10 +27,18 @@ struct genl_info;
27 * @maxattr: maximum number of attributes supported 27 * @maxattr: maximum number of attributes supported
28 * @netnsok: set to true if the family can handle network 28 * @netnsok: set to true if the family can handle network
29 * namespaces and should be presented in all of them 29 * namespaces and should be presented in all of them
30 * @parallel_ops: operations can be called in parallel and aren't
31 * synchronized by the core genetlink code
30 * @pre_doit: called before an operation's doit callback, it may 32 * @pre_doit: called before an operation's doit callback, it may
31 * do additional, common, filtering and return an error 33 * do additional, common, filtering and return an error
32 * @post_doit: called after an operation's doit callback, it may 34 * @post_doit: called after an operation's doit callback, it may
33 * undo operations done by pre_doit, for example release locks 35 * undo operations done by pre_doit, for example release locks
36 * @mcast_bind: a socket bound to the given multicast group (which
37 * is given as the offset into the groups array)
38 * @mcast_unbind: a socket was unbound from the given multicast group.
39 * Note that unbind() will not be called symmetrically if the
40 * generic netlink family is removed while there are still open
41 * sockets.
34 * @attrbuf: buffer to store parsed attributes 42 * @attrbuf: buffer to store parsed attributes
35 * @family_list: family list 43 * @family_list: family list
36 * @mcgrps: multicast groups used by this family (private) 44 * @mcgrps: multicast groups used by this family (private)
@@ -53,6 +61,8 @@ struct genl_family {
53 void (*post_doit)(const struct genl_ops *ops, 61 void (*post_doit)(const struct genl_ops *ops,
54 struct sk_buff *skb, 62 struct sk_buff *skb,
55 struct genl_info *info); 63 struct genl_info *info);
64 int (*mcast_bind)(struct net *net, int group);
65 void (*mcast_unbind)(struct net *net, int group);
56 struct nlattr ** attrbuf; /* private */ 66 struct nlattr ** attrbuf; /* private */
57 const struct genl_ops * ops; /* private */ 67 const struct genl_ops * ops; /* private */
58 const struct genl_multicast_group *mcgrps; /* private */ 68 const struct genl_multicast_group *mcgrps; /* private */
@@ -395,11 +405,11 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
395} 405}
396 406
397static inline int genl_has_listeners(struct genl_family *family, 407static inline int genl_has_listeners(struct genl_family *family,
398 struct sock *sk, unsigned int group) 408 struct net *net, unsigned int group)
399{ 409{
400 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 410 if (WARN_ON_ONCE(group >= family->n_mcgrps))
401 return -EINVAL; 411 return -EINVAL;
402 group = family->mcgrp_offset + group; 412 group = family->mcgrp_offset + group;
403 return netlink_has_listeners(sk, group); 413 return netlink_has_listeners(net->genl_sock, group);
404} 414}
405#endif /* __NET_GENERIC_NETLINK_H */ 415#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 58d719ddaa60..29c7be8808d5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1270 * 1270 *
1271 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the 1271 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
1272 * driver to indicate that it requires IV generation for this 1272 * driver to indicate that it requires IV generation for this
1273 * particular key. Setting this flag does not necessarily mean that SKBs 1273 * particular key.
1274 * will have sufficient tailroom for ICV or MIC.
1275 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by 1274 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
1276 * the driver for a TKIP key if it requires Michael MIC 1275 * the driver for a TKIP key if it requires Michael MIC
1277 * generation in software. 1276 * generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
1283 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver 1282 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
1284 * if space should be prepared for the IV, but the IV 1283 * if space should be prepared for the IV, but the IV
1285 * itself should not be generated. Do not set together with 1284 * itself should not be generated. Do not set together with
1286 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does 1285 * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
1287 * not necessarily mean that SKBs will have sufficient tailroom for ICV or
1288 * MIC.
1289 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received 1286 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
1290 * management frames. The flag can help drivers that have a hardware 1287 * management frames. The flag can help drivers that have a hardware
1291 * crypto implementation that doesn't deal with management frames 1288 * crypto implementation that doesn't deal with management frames
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb070b3674a1..76f708486aae 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -190,7 +190,6 @@ struct neigh_hash_table {
190 190
191 191
192struct neigh_table { 192struct neigh_table {
193 struct neigh_table *next;
194 int family; 193 int family;
195 int entry_size; 194 int entry_size;
196 int key_len; 195 int key_len;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 57cccd0052e5..903461aa5644 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -1,6 +1,9 @@
1#ifndef __NET_VXLAN_H 1#ifndef __NET_VXLAN_H
2#define __NET_VXLAN_H 1 2#define __NET_VXLAN_H 1
3 3
4#include <linux/ip.h>
5#include <linux/ipv6.h>
6#include <linux/if_vlan.h>
4#include <linux/skbuff.h> 7#include <linux/skbuff.h>
5#include <linux/netdevice.h> 8#include <linux/netdevice.h>
6#include <linux/udp.h> 9#include <linux/udp.h>
@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
51 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 54 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
52 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); 55 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
53 56
54static inline bool vxlan_gso_check(struct sk_buff *skb) 57static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
58 netdev_features_t features)
55{ 59{
56 if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && 60 u8 l4_hdr = 0;
61
62 if (!skb->encapsulation)
63 return features;
64
65 switch (vlan_get_protocol(skb)) {
66 case htons(ETH_P_IP):
67 l4_hdr = ip_hdr(skb)->protocol;
68 break;
69 case htons(ETH_P_IPV6):
70 l4_hdr = ipv6_hdr(skb)->nexthdr;
71 break;
72 default:
73 return features;;
74 }
75
76 if ((l4_hdr == IPPROTO_UDP) &&
57 (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 77 (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
58 skb->inner_protocol != htons(ETH_P_TEB) || 78 skb->inner_protocol != htons(ETH_P_TEB) ||
59 (skb_inner_mac_header(skb) - skb_transport_header(skb) != 79 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
60 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) 80 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
61 return false; 81 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
62 82
63 return true; 83 return features;
64} 84}
65 85
66/* IP header + UDP + VXLAN + Ethernet header */ 86/* IP header + UDP + VXLAN + Ethernet header */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 1e7f74acc2ec..b429b73e875e 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -857,7 +857,7 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
857} 857}
858 858
859/** 859/**
860 * params_channels - Get the sample rate from the hw params 860 * params_rate - Get the sample rate from the hw params
861 * @p: hw params 861 * @p: hw params
862 */ 862 */
863static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) 863static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
@@ -866,7 +866,7 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
866} 866}
867 867
868/** 868/**
869 * params_channels - Get the period size (in frames) from the hw params 869 * params_period_size - Get the period size (in frames) from the hw params
870 * @p: hw params 870 * @p: hw params
871 */ 871 */
872static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) 872static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
@@ -875,7 +875,7 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
875} 875}
876 876
877/** 877/**
878 * params_channels - Get the number of periods from the hw params 878 * params_periods - Get the number of periods from the hw params
879 * @p: hw params 879 * @p: hw params
880 */ 880 */
881static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) 881static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
@@ -884,7 +884,7 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
884} 884}
885 885
886/** 886/**
887 * params_channels - Get the buffer size (in frames) from the hw params 887 * params_buffer_size - Get the buffer size (in frames) from the hw params
888 * @p: hw params 888 * @p: hw params
889 */ 889 */
890static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) 890static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
@@ -893,7 +893,7 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
893} 893}
894 894
895/** 895/**
896 * params_channels - Get the buffer size (in bytes) from the hw params 896 * params_buffer_bytes - Get the buffer size (in bytes) from the hw params
897 * @p: hw params 897 * @p: hw params
898 */ 898 */
899static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) 899static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 430cfaf92285..db81c65b8f48 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
135int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 135int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
136int se_dev_set_queue_depth(struct se_device *, u32); 136int se_dev_set_queue_depth(struct se_device *, u32);
137int se_dev_set_max_sectors(struct se_device *, u32); 137int se_dev_set_max_sectors(struct se_device *, u32);
138int se_dev_set_fabric_max_sectors(struct se_device *, u32);
139int se_dev_set_optimal_sectors(struct se_device *, u32); 138int se_dev_set_optimal_sectors(struct se_device *, u32);
140int se_dev_set_block_size(struct se_device *, u32); 139int se_dev_set_block_size(struct se_device *, u32);
141 140
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
index 3247d7530107..186f7a923570 100644
--- a/include/target/target_core_backend_configfs.h
+++ b/include/target/target_core_backend_configfs.h
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ 98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ 99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ 100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
101 DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
102 TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
103 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ 101 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
104 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ 102 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
105 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ 103 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 397fb635766a..4a8795a87b9e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -77,8 +77,6 @@
77#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 77#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
78/* Default max_write_same_len, disabled by default */ 78/* Default max_write_same_len, disabled by default */
79#define DA_MAX_WRITE_SAME_LEN 0 79#define DA_MAX_WRITE_SAME_LEN 0
80/* Default max transfer length */
81#define DA_FABRIC_MAX_SECTORS 8192
82/* Use a model alias based on the configfs backend device name */ 80/* Use a model alias based on the configfs backend device name */
83#define DA_EMULATE_MODEL_ALIAS 0 81#define DA_EMULATE_MODEL_ALIAS 0
84/* Emulation for Direct Page Out */ 82/* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
694 u32 hw_block_size; 692 u32 hw_block_size;
695 u32 block_size; 693 u32 block_size;
696 u32 hw_max_sectors; 694 u32 hw_max_sectors;
697 u32 fabric_max_sectors;
698 u32 optimal_sectors; 695 u32 optimal_sectors;
699 u32 hw_queue_depth; 696 u32 hw_queue_depth;
700 u32 queue_depth; 697 u32 queue_depth;
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 7543b3e51331..e063effe0cc1 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -5,7 +5,7 @@
5 5
6/* 6/*
7 * FMODE_EXEC is 0x20 7 * FMODE_EXEC is 0x20
8 * FMODE_NONOTIFY is 0x1000000 8 * FMODE_NONOTIFY is 0x4000000
9 * These cannot be used by userspace O_* until internal and external open 9 * These cannot be used by userspace O_* until internal and external open
10 * flags are split. 10 * flags are split.
11 * -Eric Paris 11 * -Eric Paris
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 3e4323a3918d..94ffe0c83ce7 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -98,6 +98,7 @@ struct can_ctrlmode {
98#define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */ 98#define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */
99#define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */ 99#define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */
100#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */ 100#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
101#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
101 102
102/* 103/*
103 * CAN device statistics 104 * CAN device statistics
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 74a2a1773494..79b12b004ade 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
149/* 149/*
150 * IPV6 socket options 150 * IPV6 socket options
151 */ 151 */
152 152#if __UAPI_DEF_IPV6_OPTIONS
153#define IPV6_ADDRFORM 1 153#define IPV6_ADDRFORM 1
154#define IPV6_2292PKTINFO 2 154#define IPV6_2292PKTINFO 2
155#define IPV6_2292HOPOPTS 3 155#define IPV6_2292HOPOPTS 3
@@ -196,6 +196,7 @@ struct in6_flowlabel_req {
196 196
197#define IPV6_IPSEC_POLICY 34 197#define IPV6_IPSEC_POLICY 34
198#define IPV6_XFRM_POLICY 35 198#define IPV6_XFRM_POLICY 35
199#endif
199 200
200/* 201/*
201 * Multicast: 202 * Multicast:
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 7acef41fc209..af94f31e33ac 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
128 uint32_t pad; 128 uint32_t pad;
129}; 129};
130 130
131#define KFD_IOC_MAGIC 'K' 131#define AMDKFD_IOCTL_BASE 'K'
132#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
133#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
134#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
135#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
132 136
133#define KFD_IOC_GET_VERSION \ 137#define AMDKFD_IOC_GET_VERSION \
134 _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) 138 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
135 139
136#define KFD_IOC_CREATE_QUEUE \ 140#define AMDKFD_IOC_CREATE_QUEUE \
137 _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) 141 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
138 142
139#define KFD_IOC_DESTROY_QUEUE \ 143#define AMDKFD_IOC_DESTROY_QUEUE \
140 _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) 144 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
141 145
142#define KFD_IOC_SET_MEMORY_POLICY \ 146#define AMDKFD_IOC_SET_MEMORY_POLICY \
143 _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) 147 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
144 148
145#define KFD_IOC_GET_CLOCK_COUNTERS \ 149#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
146 _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) 150 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
147 151
148#define KFD_IOC_GET_PROCESS_APERTURES \ 152#define AMDKFD_IOC_GET_PROCESS_APERTURES \
149 _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) 153 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
150 154
151#define KFD_IOC_UPDATE_QUEUE \ 155#define AMDKFD_IOC_UPDATE_QUEUE \
152 _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) 156 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
157
158#define AMDKFD_COMMAND_START 0x01
159#define AMDKFD_COMMAND_END 0x08
153 160
154#endif 161#endif
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index c140620dad92..e28807ad17fa 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -69,6 +69,7 @@
69#define __UAPI_DEF_SOCKADDR_IN6 0 69#define __UAPI_DEF_SOCKADDR_IN6 0
70#define __UAPI_DEF_IPV6_MREQ 0 70#define __UAPI_DEF_IPV6_MREQ 0
71#define __UAPI_DEF_IPPROTO_V6 0 71#define __UAPI_DEF_IPPROTO_V6 0
72#define __UAPI_DEF_IPV6_OPTIONS 0
72 73
73#else 74#else
74 75
@@ -82,6 +83,7 @@
82#define __UAPI_DEF_SOCKADDR_IN6 1 83#define __UAPI_DEF_SOCKADDR_IN6 1
83#define __UAPI_DEF_IPV6_MREQ 1 84#define __UAPI_DEF_IPV6_MREQ 1
84#define __UAPI_DEF_IPPROTO_V6 1 85#define __UAPI_DEF_IPPROTO_V6 1
86#define __UAPI_DEF_IPV6_OPTIONS 1
85 87
86#endif /* _NETINET_IN_H */ 88#endif /* _NETINET_IN_H */
87 89
@@ -103,6 +105,7 @@
103#define __UAPI_DEF_SOCKADDR_IN6 1 105#define __UAPI_DEF_SOCKADDR_IN6 1
104#define __UAPI_DEF_IPV6_MREQ 1 106#define __UAPI_DEF_IPV6_MREQ 1
105#define __UAPI_DEF_IPPROTO_V6 1 107#define __UAPI_DEF_IPPROTO_V6 1
108#define __UAPI_DEF_IPV6_OPTIONS 1
106 109
107/* Definitions for xattr.h */ 110/* Definitions for xattr.h */
108#define __UAPI_DEF_XATTR 1 111#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3a6dcaa359b7..f714e8633352 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
174 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ 174 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
175 OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* 175 OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
176 attributes. */ 176 attributes. */
177 OVS_PACKET_ATTR_UNUSED1,
178 OVS_PACKET_ATTR_UNUSED2,
179 OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
180 error logging should be suppressed. */
177 __OVS_PACKET_ATTR_MAX 181 __OVS_PACKET_ATTR_MAX
178}; 182};
179 183
diff --git a/include/uapi/linux/uinput.h b/include/uapi/linux/uinput.h
index baeab83deb64..013c9d8db372 100644
--- a/include/uapi/linux/uinput.h
+++ b/include/uapi/linux/uinput.h
@@ -82,7 +82,7 @@ struct uinput_ff_erase {
82 * The complete sysfs path is then /sys/devices/virtual/input/--NAME-- 82 * The complete sysfs path is then /sys/devices/virtual/input/--NAME--
83 * Usually, it is in the form "inputN" 83 * Usually, it is in the form "inputN"
84 */ 84 */
85#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 300, len) 85#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 44, len)
86 86
87/** 87/**
88 * UI_GET_VERSION - Return version of uinput protocol 88 * UI_GET_VERSION - Return version of uinput protocol
@@ -91,7 +91,7 @@ struct uinput_ff_erase {
91 * the integer pointed to by the ioctl argument. The protocol version 91 * the integer pointed to by the ioctl argument. The protocol version
92 * is hard-coded in the kernel and is independent of the uinput device. 92 * is hard-coded in the kernel and is independent of the uinput device.
93 */ 93 */
94#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 301, unsigned int) 94#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 45, unsigned int)
95 95
96/* 96/*
97 * To write a force-feedback-capable driver, the upload_effect 97 * To write a force-feedback-capable driver, the upload_effect
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 61c818a7fe70..a3318f31e8e7 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -101,6 +101,13 @@ struct vring {
101 struct vring_used *used; 101 struct vring_used *used;
102}; 102};
103 103
104/* Alignment requirements for vring elements.
105 * When using pre-virtio 1.0 layout, these fall out naturally.
106 */
107#define VRING_AVAIL_ALIGN_SIZE 2
108#define VRING_USED_ALIGN_SIZE 4
109#define VRING_DESC_ALIGN_SIZE 16
110
104/* The standard layout for the ring is a continuous chunk of memory which looks 111/* The standard layout for the ring is a continuous chunk of memory which looks
105 * like this. We assume num is a power of 2. 112 * like this. We assume num is a power of 2.
106 * 113 *
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644
index 000000000000..b47d9d06fade
--- /dev/null
+++ b/include/xen/interface/nmi.h
@@ -0,0 +1,51 @@
1/******************************************************************************
2 * nmi.h
3 *
4 * NMI callback registration and reason codes.
5 *
6 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
7 */
8
9#ifndef __XEN_PUBLIC_NMI_H__
10#define __XEN_PUBLIC_NMI_H__
11
12#include <xen/interface/xen.h>
13
14/*
15 * NMI reason codes:
16 * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
17 */
18 /* I/O-check error reported via ISA port 0x61, bit 6. */
19#define _XEN_NMIREASON_io_error 0
20#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
21 /* PCI SERR reported via ISA port 0x61, bit 7. */
22#define _XEN_NMIREASON_pci_serr 1
23#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
24 /* Unknown hardware-generated NMI. */
25#define _XEN_NMIREASON_unknown 2
26#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
27
28/*
29 * long nmi_op(unsigned int cmd, void *arg)
30 * NB. All ops return zero on success, else a negative error code.
31 */
32
33/*
34 * Register NMI callback for this (calling) VCPU. Currently this only makes
35 * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
36 * arg == pointer to xennmi_callback structure.
37 */
38#define XENNMI_register_callback 0
39struct xennmi_callback {
40 unsigned long handler_address;
41 unsigned long pad;
42};
43DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
44
45/*
46 * Deregister NMI callback for this (calling) VCPU.
47 * arg == NULL.
48 */
49#define XENNMI_unregister_callback 1
50
51#endif /* __XEN_PUBLIC_NMI_H__ */
diff --git a/kernel/audit.c b/kernel/audit.c
index f8f203e8018c..72ab759a0b43 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
429 * This function doesn't consume an skb as might be expected since it has to 429 * This function doesn't consume an skb as might be expected since it has to
430 * copy it anyways. 430 * copy it anyways.
431 */ 431 */
432static void kauditd_send_multicast_skb(struct sk_buff *skb) 432static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
433{ 433{
434 struct sk_buff *copy; 434 struct sk_buff *copy;
435 struct audit_net *aunet = net_generic(&init_net, audit_net_id); 435 struct audit_net *aunet = net_generic(&init_net, audit_net_id);
@@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
448 * no reason for new multicast clients to continue with this 448 * no reason for new multicast clients to continue with this
449 * non-compliance. 449 * non-compliance.
450 */ 450 */
451 copy = skb_copy(skb, GFP_KERNEL); 451 copy = skb_copy(skb, gfp_mask);
452 if (!copy) 452 if (!copy)
453 return; 453 return;
454 454
455 nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL); 455 nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
456} 456}
457 457
458/* 458/*
@@ -1100,7 +1100,7 @@ static void audit_receive(struct sk_buff *skb)
1100} 1100}
1101 1101
1102/* Run custom bind function on netlink socket group connect or bind requests. */ 1102/* Run custom bind function on netlink socket group connect or bind requests. */
1103static int audit_bind(int group) 1103static int audit_bind(struct net *net, int group)
1104{ 1104{
1105 if (!capable(CAP_AUDIT_READ)) 1105 if (!capable(CAP_AUDIT_READ))
1106 return -EPERM; 1106 return -EPERM;
@@ -1940,7 +1940,7 @@ void audit_log_end(struct audit_buffer *ab)
1940 struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); 1940 struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
1941 1941
1942 nlh->nlmsg_len = ab->skb->len; 1942 nlh->nlmsg_len = ab->skb->len;
1943 kauditd_send_multicast_skb(ab->skb); 1943 kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
1944 1944
1945 /* 1945 /*
1946 * The original kaudit unicast socket sends up messages with 1946 * The original kaudit unicast socket sends up messages with
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 3598e13f2a65..4f68a326d92e 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -442,19 +442,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
442 if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) { 442 if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
443 f->type = AUDIT_LOGINUID_SET; 443 f->type = AUDIT_LOGINUID_SET;
444 f->val = 0; 444 f->val = 0;
445 } 445 entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
446
447 if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
448 struct pid *pid;
449 rcu_read_lock();
450 pid = find_vpid(f->val);
451 if (!pid) {
452 rcu_read_unlock();
453 err = -ESRCH;
454 goto exit_free;
455 }
456 f->val = pid_nr(pid);
457 rcu_read_unlock();
458 } 446 }
459 447
460 err = audit_field_valid(entry, f); 448 err = audit_field_valid(entry, f);
@@ -630,6 +618,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
630 data->buflen += data->values[i] = 618 data->buflen += data->values[i] =
631 audit_pack_string(&bufp, krule->filterkey); 619 audit_pack_string(&bufp, krule->filterkey);
632 break; 620 break;
621 case AUDIT_LOGINUID_SET:
622 if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
623 data->fields[i] = AUDIT_LOGINUID;
624 data->values[i] = AUDIT_UID_UNSET;
625 break;
626 }
627 /* fallthrough if set */
633 default: 628 default:
634 data->values[i] = f->val; 629 data->values[i] = f->val;
635 } 630 }
@@ -646,6 +641,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
646 int i; 641 int i;
647 642
648 if (a->flags != b->flags || 643 if (a->flags != b->flags ||
644 a->pflags != b->pflags ||
649 a->listnr != b->listnr || 645 a->listnr != b->listnr ||
650 a->action != b->action || 646 a->action != b->action ||
651 a->field_count != b->field_count) 647 a->field_count != b->field_count)
@@ -764,6 +760,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
764 new = &entry->rule; 760 new = &entry->rule;
765 new->vers_ops = old->vers_ops; 761 new->vers_ops = old->vers_ops;
766 new->flags = old->flags; 762 new->flags = old->flags;
763 new->pflags = old->pflags;
767 new->listnr = old->listnr; 764 new->listnr = old->listnr;
768 new->action = old->action; 765 new->action = old->action;
769 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) 766 for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c75522a83678..072566dd0caf 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -72,6 +72,8 @@
72#include <linux/fs_struct.h> 72#include <linux/fs_struct.h>
73#include <linux/compat.h> 73#include <linux/compat.h>
74#include <linux/ctype.h> 74#include <linux/ctype.h>
75#include <linux/string.h>
76#include <uapi/linux/limits.h>
75 77
76#include "audit.h" 78#include "audit.h"
77 79
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
1861 } 1863 }
1862 1864
1863 list_for_each_entry_reverse(n, &context->names_list, list) { 1865 list_for_each_entry_reverse(n, &context->names_list, list) {
1864 /* does the name pointer match? */ 1866 if (!n->name || strcmp(n->name->name, name->name))
1865 if (!n->name || n->name->name != name->name)
1866 continue; 1867 continue;
1867 1868
1868 /* match the correct record type */ 1869 /* match the correct record type */
@@ -1877,12 +1878,48 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
1877 } 1878 }
1878 1879
1879out_alloc: 1880out_alloc:
1880 /* unable to find the name from a previous getname(). Allocate a new 1881 /* unable to find an entry with both a matching name and type */
1881 * anonymous entry. 1882 n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
1882 */
1883 n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
1884 if (!n) 1883 if (!n)
1885 return; 1884 return;
1885 /* unfortunately, while we may have a path name to record with the
1886 * inode, we can't always rely on the string lasting until the end of
1887 * the syscall so we need to create our own copy, it may fail due to
1888 * memory allocation issues, but we do our best */
1889 if (name) {
1890 /* we can't use getname_kernel() due to size limits */
1891 size_t len = strlen(name->name) + 1;
1892 struct filename *new = __getname();
1893
1894 if (unlikely(!new))
1895 goto out;
1896
1897 if (len <= (PATH_MAX - sizeof(*new))) {
1898 new->name = (char *)(new) + sizeof(*new);
1899 new->separate = false;
1900 } else if (len <= PATH_MAX) {
1901 /* this looks odd, but is due to final_putname() */
1902 struct filename *new2;
1903
1904 new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
1905 if (unlikely(!new2)) {
1906 __putname(new);
1907 goto out;
1908 }
1909 new2->name = (char *)new;
1910 new2->separate = true;
1911 new = new2;
1912 } else {
1913 /* we should never get here, but let's be safe */
1914 __putname(new);
1915 goto out;
1916 }
1917 strlcpy((char *)new->name, name->name, len);
1918 new->uptr = NULL;
1919 new->aname = n;
1920 n->name = new;
1921 n->name_put = true;
1922 }
1886out: 1923out:
1887 if (parent) { 1924 if (parent) {
1888 n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; 1925 n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1adf62b39b96..07ce18ca71e0 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -27,6 +27,9 @@
27 * version 2. This program is licensed "as is" without any warranty of any 27 * version 2. This program is licensed "as is" without any warranty of any
28 * kind, whether express or implied. 28 * kind, whether express or implied.
29 */ 29 */
30
31#define pr_fmt(fmt) "KGDB: " fmt
32
30#include <linux/pid_namespace.h> 33#include <linux/pid_namespace.h>
31#include <linux/clocksource.h> 34#include <linux/clocksource.h>
32#include <linux/serial_core.h> 35#include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
196 return err; 199 return err;
197 err = kgdb_arch_remove_breakpoint(&tmp); 200 err = kgdb_arch_remove_breakpoint(&tmp);
198 if (err) 201 if (err)
199 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " 202 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
200 "memory destroyed at: %lx", addr); 203 addr);
201 return err; 204 return err;
202} 205}
203 206
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
256 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); 259 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
257 if (error) { 260 if (error) {
258 ret = error; 261 ret = error;
259 printk(KERN_INFO "KGDB: BP install failed: %lx", 262 pr_info("BP install failed: %lx\n",
260 kgdb_break[i].bpt_addr); 263 kgdb_break[i].bpt_addr);
261 continue; 264 continue;
262 } 265 }
263 266
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
319 continue; 322 continue;
320 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 323 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
321 if (error) { 324 if (error) {
322 printk(KERN_INFO "KGDB: BP remove failed: %lx\n", 325 pr_info("BP remove failed: %lx\n",
323 kgdb_break[i].bpt_addr); 326 kgdb_break[i].bpt_addr);
324 ret = error; 327 ret = error;
325 } 328 }
326 329
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
367 goto setundefined; 370 goto setundefined;
368 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 371 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
369 if (error) 372 if (error)
370 printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", 373 pr_err("breakpoint remove failed: %lx\n",
371 kgdb_break[i].bpt_addr); 374 kgdb_break[i].bpt_addr);
372setundefined: 375setundefined:
373 kgdb_break[i].state = BP_UNDEFINED; 376 kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
400 if (print_wait) { 403 if (print_wait) {
401#ifdef CONFIG_KGDB_KDB 404#ifdef CONFIG_KGDB_KDB
402 if (!dbg_kdb_mode) 405 if (!dbg_kdb_mode)
403 printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); 406 pr_crit("waiting... or $3#33 for KDB\n");
404#else 407#else
405 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); 408 pr_crit("Waiting for remote debugger\n");
406#endif 409#endif
407 } 410 }
408 return 1; 411 return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
430 exception_level = 0; 433 exception_level = 0;
431 kgdb_skipexception(ks->ex_vector, ks->linux_regs); 434 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
432 dbg_activate_sw_breakpoints(); 435 dbg_activate_sw_breakpoints();
433 printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", 436 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
434 addr);
435 WARN_ON_ONCE(1); 437 WARN_ON_ONCE(1);
436 438
437 return 1; 439 return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
444 panic("Recursive entry to debugger"); 446 panic("Recursive entry to debugger");
445 } 447 }
446 448
447 printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); 449 pr_crit("re-enter exception: ALL breakpoints killed\n");
448#ifdef CONFIG_KGDB_KDB 450#ifdef CONFIG_KGDB_KDB
449 /* Allow kdb to debug itself one level */ 451 /* Allow kdb to debug itself one level */
450 return 0; 452 return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
471 int cpu; 473 int cpu;
472 int trace_on = 0; 474 int trace_on = 0;
473 int online_cpus = num_online_cpus(); 475 int online_cpus = num_online_cpus();
476 u64 time_left;
474 477
475 kgdb_info[ks->cpu].enter_kgdb++; 478 kgdb_info[ks->cpu].enter_kgdb++;
476 kgdb_info[ks->cpu].exception_state |= exception_state; 479 kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
595 /* 598 /*
596 * Wait for the other CPUs to be notified and be waiting for us: 599 * Wait for the other CPUs to be notified and be waiting for us:
597 */ 600 */
598 while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + 601 time_left = loops_per_jiffy * HZ;
599 atomic_read(&slaves_in_kgdb)) != online_cpus) 602 while (kgdb_do_roundup && --time_left &&
603 (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
604 online_cpus)
600 cpu_relax(); 605 cpu_relax();
606 if (!time_left)
607 pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
601 608
602 /* 609 /*
603 * At this point the primary processor is completely 610 * At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
795static void sysrq_handle_dbg(int key) 802static void sysrq_handle_dbg(int key)
796{ 803{
797 if (!dbg_io_ops) { 804 if (!dbg_io_ops) {
798 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 805 pr_crit("ERROR: No KGDB I/O module available\n");
799 return; 806 return;
800 } 807 }
801 if (!kgdb_connected) { 808 if (!kgdb_connected) {
802#ifdef CONFIG_KGDB_KDB 809#ifdef CONFIG_KGDB_KDB
803 if (!dbg_kdb_mode) 810 if (!dbg_kdb_mode)
804 printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); 811 pr_crit("KGDB or $3#33 for KDB\n");
805#else 812#else
806 printk(KERN_CRIT "Entering KGDB\n"); 813 pr_crit("Entering KGDB\n");
807#endif 814#endif
808 } 815 }
809 816
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
945{ 952{
946 kgdb_break_asap = 0; 953 kgdb_break_asap = 0;
947 954
948 printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); 955 pr_crit("Waiting for connection from remote gdb...\n");
949 kgdb_breakpoint(); 956 kgdb_breakpoint();
950} 957}
951 958
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
964 if (dbg_io_ops) { 971 if (dbg_io_ops) {
965 spin_unlock(&kgdb_registration_lock); 972 spin_unlock(&kgdb_registration_lock);
966 973
967 printk(KERN_ERR "kgdb: Another I/O driver is already " 974 pr_err("Another I/O driver is already registered with KGDB\n");
968 "registered with KGDB.\n");
969 return -EBUSY; 975 return -EBUSY;
970 } 976 }
971 977
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
981 987
982 spin_unlock(&kgdb_registration_lock); 988 spin_unlock(&kgdb_registration_lock);
983 989
984 printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", 990 pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
985 new_dbg_io_ops->name);
986 991
987 /* Arm KGDB now. */ 992 /* Arm KGDB now. */
988 kgdb_register_callbacks(); 993 kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1017 1022
1018 spin_unlock(&kgdb_registration_lock); 1023 spin_unlock(&kgdb_registration_lock);
1019 1024
1020 printk(KERN_INFO 1025 pr_info("Unregistered I/O driver %s, debugger disabled\n",
1021 "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
1022 old_dbg_io_ops->name); 1026 old_dbg_io_ops->name);
1023} 1027}
1024EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); 1028EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index b20d544f20c2..e1dbf4a2c69e 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
531 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) 531 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
532 bp->bp_free = 1; 532 bp->bp_free = 1;
533 533
534 kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", 534 kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
535 "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); 535 "Set/Display breakpoints", 0,
536 kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", 536 KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
537 "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); 537 kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
538 "Display breakpoints", 0,
539 KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
538 if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) 540 if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
539 kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", 541 kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
540 "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); 542 "[datar [length]|dataw [length]] Set hw brk", 0,
541 kdb_register_repeat("bc", kdb_bc, "<bpnum>", 543 KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
542 "Clear Breakpoint", 0, KDB_REPEAT_NONE); 544 kdb_register_flags("bc", kdb_bc, "<bpnum>",
543 kdb_register_repeat("be", kdb_bc, "<bpnum>", 545 "Clear Breakpoint", 0,
544 "Enable Breakpoint", 0, KDB_REPEAT_NONE); 546 KDB_ENABLE_FLOW_CTRL);
545 kdb_register_repeat("bd", kdb_bc, "<bpnum>", 547 kdb_register_flags("be", kdb_bc, "<bpnum>",
546 "Disable Breakpoint", 0, KDB_REPEAT_NONE); 548 "Enable Breakpoint", 0,
547 549 KDB_ENABLE_FLOW_CTRL);
548 kdb_register_repeat("ss", kdb_ss, "", 550 kdb_register_flags("bd", kdb_bc, "<bpnum>",
549 "Single Step", 1, KDB_REPEAT_NO_ARGS); 551 "Disable Breakpoint", 0,
552 KDB_ENABLE_FLOW_CTRL);
553
554 kdb_register_flags("ss", kdb_ss, "",
555 "Single Step", 1,
556 KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
550 /* 557 /*
551 * Architecture dependent initialization. 558 * Architecture dependent initialization.
552 */ 559 */
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8859ca34dcfe..15e1a7af5dd0 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
129 ks->pass_exception = 1; 129 ks->pass_exception = 1;
130 KDB_FLAG_SET(CATASTROPHIC); 130 KDB_FLAG_SET(CATASTROPHIC);
131 } 131 }
132 /* set CATASTROPHIC if the system contains unresponsive processors */
133 for_each_online_cpu(i)
134 if (!kgdb_info[i].enter_kgdb)
135 KDB_FLAG_SET(CATASTROPHIC);
132 if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { 136 if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
133 KDB_STATE_CLEAR(SSBPT); 137 KDB_STATE_CLEAR(SSBPT);
134 KDB_STATE_CLEAR(DOING_SS); 138 KDB_STATE_CLEAR(DOING_SS);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 379650b984f8..f191bddf64b8 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/types.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/kmsg_dump.h> 18#include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
23#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
24#include <linux/atomic.h> 25#include <linux/atomic.h>
25#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/moduleparam.h>
26#include <linux/mm.h> 28#include <linux/mm.h>
27#include <linux/init.h> 29#include <linux/init.h>
28#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
@@ -42,6 +44,12 @@
42#include <linux/slab.h> 44#include <linux/slab.h>
43#include "kdb_private.h" 45#include "kdb_private.h"
44 46
47#undef MODULE_PARAM_PREFIX
48#define MODULE_PARAM_PREFIX "kdb."
49
50static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
51module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
52
45#define GREP_LEN 256 53#define GREP_LEN 256
46char kdb_grep_string[GREP_LEN]; 54char kdb_grep_string[GREP_LEN];
47int kdb_grepping_flag; 55int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
121 KDBMSG(BADLENGTH, "Invalid length field"), 129 KDBMSG(BADLENGTH, "Invalid length field"),
122 KDBMSG(NOBP, "No Breakpoint exists"), 130 KDBMSG(NOBP, "No Breakpoint exists"),
123 KDBMSG(BADADDR, "Invalid address"), 131 KDBMSG(BADADDR, "Invalid address"),
132 KDBMSG(NOPERM, "Permission denied"),
124}; 133};
125#undef KDBMSG 134#undef KDBMSG
126 135
@@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu)
188} 197}
189 198
190/* 199/*
200 * Check whether the flags of the current command and the permissions
201 * of the kdb console has allow a command to be run.
202 */
203static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
204 bool no_args)
205{
206 /* permissions comes from userspace so needs massaging slightly */
207 permissions &= KDB_ENABLE_MASK;
208 permissions |= KDB_ENABLE_ALWAYS_SAFE;
209
210 /* some commands change group when launched with no arguments */
211 if (no_args)
212 permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
213
214 flags |= KDB_ENABLE_ALL;
215
216 return permissions & flags;
217}
218
219/*
191 * kdbgetenv - This function will return the character string value of 220 * kdbgetenv - This function will return the character string value of
192 * an environment variable. 221 * an environment variable.
193 * Parameters: 222 * Parameters:
@@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
476 kdb_symtab_t symtab; 505 kdb_symtab_t symtab;
477 506
478 /* 507 /*
508 * If the enable flags prohibit both arbitrary memory access
509 * and flow control then there are no reasonable grounds to
510 * provide symbol lookup.
511 */
512 if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
513 kdb_cmd_enabled, false))
514 return KDB_NOPERM;
515
516 /*
479 * Process arguments which follow the following syntax: 517 * Process arguments which follow the following syntax:
480 * 518 *
481 * symbol | numeric-address [+/- numeric-offset] 519 * symbol | numeric-address [+/- numeric-offset]
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
641 if (!s->count) 679 if (!s->count)
642 s->usable = 0; 680 s->usable = 0;
643 if (s->usable) 681 if (s->usable)
644 kdb_register(s->name, kdb_exec_defcmd, 682 /* macros are always safe because when executed each
645 s->usage, s->help, 0); 683 * internal command re-enters kdb_parse() and is
684 * safety checked individually.
685 */
686 kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
687 s->help, 0,
688 KDB_ENABLE_ALWAYS_SAFE);
646 return 0; 689 return 0;
647 } 690 }
648 if (!s->usable) 691 if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
1003 1046
1004 if (i < kdb_max_commands) { 1047 if (i < kdb_max_commands) {
1005 int result; 1048 int result;
1049
1050 if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
1051 return KDB_NOPERM;
1052
1006 KDB_STATE_SET(CMD); 1053 KDB_STATE_SET(CMD);
1007 result = (*tp->cmd_func)(argc-1, (const char **)argv); 1054 result = (*tp->cmd_func)(argc-1, (const char **)argv);
1008 if (result && ignore_errors && result > KDB_CMD_GO) 1055 if (result && ignore_errors && result > KDB_CMD_GO)
1009 result = 0; 1056 result = 0;
1010 KDB_STATE_CLEAR(CMD); 1057 KDB_STATE_CLEAR(CMD);
1011 switch (tp->cmd_repeat) { 1058
1012 case KDB_REPEAT_NONE: 1059 if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
1013 argc = 0; 1060 return result;
1014 if (argv[0]) 1061
1015 *(argv[0]) = '\0'; 1062 argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
1016 break; 1063 if (argv[argc])
1017 case KDB_REPEAT_NO_ARGS: 1064 *(argv[argc]) = '\0';
1018 argc = 1;
1019 if (argv[1])
1020 *(argv[1]) = '\0';
1021 break;
1022 case KDB_REPEAT_WITH_ARGS:
1023 break;
1024 }
1025 return result; 1065 return result;
1026 } 1066 }
1027 1067
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
1921 */ 1961 */
1922static int kdb_sr(int argc, const char **argv) 1962static int kdb_sr(int argc, const char **argv)
1923{ 1963{
1964 bool check_mask =
1965 !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
1966
1924 if (argc != 1) 1967 if (argc != 1)
1925 return KDB_ARGCOUNT; 1968 return KDB_ARGCOUNT;
1969
1926 kdb_trap_printk++; 1970 kdb_trap_printk++;
1927 __handle_sysrq(*argv[1], false); 1971 __handle_sysrq(*argv[1], check_mask);
1928 kdb_trap_printk--; 1972 kdb_trap_printk--;
1929 1973
1930 return 0; 1974 return 0;
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
2157 for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { 2201 for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
2158 if (!cpu_online(i)) { 2202 if (!cpu_online(i)) {
2159 state = 'F'; /* cpu is offline */ 2203 state = 'F'; /* cpu is offline */
2204 } else if (!kgdb_info[i].enter_kgdb) {
2205 state = 'D'; /* cpu is online but unresponsive */
2160 } else { 2206 } else {
2161 state = ' '; /* cpu is responding to kdb */ 2207 state = ' '; /* cpu is responding to kdb */
2162 if (kdb_task_state_char(KDB_TSK(i)) == 'I') 2208 if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
2210 /* 2256 /*
2211 * Validate cpunum 2257 * Validate cpunum
2212 */ 2258 */
2213 if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) 2259 if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
2214 return KDB_BADCPUNUM; 2260 return KDB_BADCPUNUM;
2215 2261
2216 dbg_switch_cpu = cpunum; 2262 dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
2375 return 0; 2421 return 0;
2376 if (!kt->cmd_name) 2422 if (!kt->cmd_name)
2377 continue; 2423 continue;
2424 if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
2425 continue;
2378 if (strlen(kt->cmd_usage) > 20) 2426 if (strlen(kt->cmd_usage) > 20)
2379 space = "\n "; 2427 space = "\n ";
2380 kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, 2428 kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
2629} 2677}
2630 2678
2631/* 2679/*
2632 * kdb_register_repeat - This function is used to register a kernel 2680 * kdb_register_flags - This function is used to register a kernel
2633 * debugger command. 2681 * debugger command.
2634 * Inputs: 2682 * Inputs:
2635 * cmd Command name 2683 * cmd Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
2641 * zero for success, one if a duplicate command. 2689 * zero for success, one if a duplicate command.
2642 */ 2690 */
2643#define kdb_command_extend 50 /* arbitrary */ 2691#define kdb_command_extend 50 /* arbitrary */
2644int kdb_register_repeat(char *cmd, 2692int kdb_register_flags(char *cmd,
2645 kdb_func_t func, 2693 kdb_func_t func,
2646 char *usage, 2694 char *usage,
2647 char *help, 2695 char *help,
2648 short minlen, 2696 short minlen,
2649 kdb_repeat_t repeat) 2697 kdb_cmdflags_t flags)
2650{ 2698{
2651 int i; 2699 int i;
2652 kdbtab_t *kp; 2700 kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
2694 kp->cmd_func = func; 2742 kp->cmd_func = func;
2695 kp->cmd_usage = usage; 2743 kp->cmd_usage = usage;
2696 kp->cmd_help = help; 2744 kp->cmd_help = help;
2697 kp->cmd_flags = 0;
2698 kp->cmd_minlen = minlen; 2745 kp->cmd_minlen = minlen;
2699 kp->cmd_repeat = repeat; 2746 kp->cmd_flags = flags;
2700 2747
2701 return 0; 2748 return 0;
2702} 2749}
2703EXPORT_SYMBOL_GPL(kdb_register_repeat); 2750EXPORT_SYMBOL_GPL(kdb_register_flags);
2704 2751
2705 2752
2706/* 2753/*
2707 * kdb_register - Compatibility register function for commands that do 2754 * kdb_register - Compatibility register function for commands that do
2708 * not need to specify a repeat state. Equivalent to 2755 * not need to specify a repeat state. Equivalent to
2709 * kdb_register_repeat with KDB_REPEAT_NONE. 2756 * kdb_register_flags with flags set to 0.
2710 * Inputs: 2757 * Inputs:
2711 * cmd Command name 2758 * cmd Command name
2712 * func Function to execute the command 2759 * func Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
2721 char *help, 2768 char *help,
2722 short minlen) 2769 short minlen)
2723{ 2770{
2724 return kdb_register_repeat(cmd, func, usage, help, minlen, 2771 return kdb_register_flags(cmd, func, usage, help, minlen, 0);
2725 KDB_REPEAT_NONE);
2726} 2772}
2727EXPORT_SYMBOL_GPL(kdb_register); 2773EXPORT_SYMBOL_GPL(kdb_register);
2728 2774
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
2764 for_each_kdbcmd(kp, i) 2810 for_each_kdbcmd(kp, i)
2765 kp->cmd_name = NULL; 2811 kp->cmd_name = NULL;
2766 2812
2767 kdb_register_repeat("md", kdb_md, "<vaddr>", 2813 kdb_register_flags("md", kdb_md, "<vaddr>",
2768 "Display Memory Contents, also mdWcN, e.g. md8c1", 1, 2814 "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
2769 KDB_REPEAT_NO_ARGS); 2815 KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
2770 kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", 2816 kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
2771 "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); 2817 "Display Raw Memory", 0,
2772 kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", 2818 KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
2773 "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); 2819 kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
2774 kdb_register_repeat("mds", kdb_md, "<vaddr>", 2820 "Display Physical Memory", 0,
2775 "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); 2821 KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
2776 kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", 2822 kdb_register_flags("mds", kdb_md, "<vaddr>",
2777 "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); 2823 "Display Memory Symbolically", 0,
2778 kdb_register_repeat("go", kdb_go, "[<vaddr>]", 2824 KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
2779 "Continue Execution", 1, KDB_REPEAT_NONE); 2825 kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
2780 kdb_register_repeat("rd", kdb_rd, "", 2826 "Modify Memory Contents", 0,
2781 "Display Registers", 0, KDB_REPEAT_NONE); 2827 KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
2782 kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", 2828 kdb_register_flags("go", kdb_go, "[<vaddr>]",
2783 "Modify Registers", 0, KDB_REPEAT_NONE); 2829 "Continue Execution", 1,
2784 kdb_register_repeat("ef", kdb_ef, "<vaddr>", 2830 KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
2785 "Display exception frame", 0, KDB_REPEAT_NONE); 2831 kdb_register_flags("rd", kdb_rd, "",
2786 kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", 2832 "Display Registers", 0,
2787 "Stack traceback", 1, KDB_REPEAT_NONE); 2833 KDB_ENABLE_REG_READ);
2788 kdb_register_repeat("btp", kdb_bt, "<pid>", 2834 kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
2789 "Display stack for process <pid>", 0, KDB_REPEAT_NONE); 2835 "Modify Registers", 0,
2790 kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", 2836 KDB_ENABLE_REG_WRITE);
2791 "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); 2837 kdb_register_flags("ef", kdb_ef, "<vaddr>",
2792 kdb_register_repeat("btc", kdb_bt, "", 2838 "Display exception frame", 0,
2793 "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); 2839 KDB_ENABLE_MEM_READ);
2794 kdb_register_repeat("btt", kdb_bt, "<vaddr>", 2840 kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
2841 "Stack traceback", 1,
2842 KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
2843 kdb_register_flags("btp", kdb_bt, "<pid>",
2844 "Display stack for process <pid>", 0,
2845 KDB_ENABLE_INSPECT);
2846 kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
2847 "Backtrace all processes matching state flag", 0,
2848 KDB_ENABLE_INSPECT);
2849 kdb_register_flags("btc", kdb_bt, "",
2850 "Backtrace current process on each cpu", 0,
2851 KDB_ENABLE_INSPECT);
2852 kdb_register_flags("btt", kdb_bt, "<vaddr>",
2795 "Backtrace process given its struct task address", 0, 2853 "Backtrace process given its struct task address", 0,
2796 KDB_REPEAT_NONE); 2854 KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
2797 kdb_register_repeat("env", kdb_env, "", 2855 kdb_register_flags("env", kdb_env, "",
2798 "Show environment variables", 0, KDB_REPEAT_NONE); 2856 "Show environment variables", 0,
2799 kdb_register_repeat("set", kdb_set, "", 2857 KDB_ENABLE_ALWAYS_SAFE);
2800 "Set environment variables", 0, KDB_REPEAT_NONE); 2858 kdb_register_flags("set", kdb_set, "",
2801 kdb_register_repeat("help", kdb_help, "", 2859 "Set environment variables", 0,
2802 "Display Help Message", 1, KDB_REPEAT_NONE); 2860 KDB_ENABLE_ALWAYS_SAFE);
2803 kdb_register_repeat("?", kdb_help, "", 2861 kdb_register_flags("help", kdb_help, "",
2804 "Display Help Message", 0, KDB_REPEAT_NONE); 2862 "Display Help Message", 1,
2805 kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", 2863 KDB_ENABLE_ALWAYS_SAFE);
2806 "Switch to new cpu", 0, KDB_REPEAT_NONE); 2864 kdb_register_flags("?", kdb_help, "",
2807 kdb_register_repeat("kgdb", kdb_kgdb, "", 2865 "Display Help Message", 0,
2808 "Enter kgdb mode", 0, KDB_REPEAT_NONE); 2866 KDB_ENABLE_ALWAYS_SAFE);
2809 kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", 2867 kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
2810 "Display active task list", 0, KDB_REPEAT_NONE); 2868 "Switch to new cpu", 0,
2811 kdb_register_repeat("pid", kdb_pid, "<pidnum>", 2869 KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
2812 "Switch to another task", 0, KDB_REPEAT_NONE); 2870 kdb_register_flags("kgdb", kdb_kgdb, "",
2813 kdb_register_repeat("reboot", kdb_reboot, "", 2871 "Enter kgdb mode", 0, 0);
2814 "Reboot the machine immediately", 0, KDB_REPEAT_NONE); 2872 kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
2873 "Display active task list", 0,
2874 KDB_ENABLE_INSPECT);
2875 kdb_register_flags("pid", kdb_pid, "<pidnum>",
2876 "Switch to another task", 0,
2877 KDB_ENABLE_INSPECT);
2878 kdb_register_flags("reboot", kdb_reboot, "",
2879 "Reboot the machine immediately", 0,
2880 KDB_ENABLE_REBOOT);
2815#if defined(CONFIG_MODULES) 2881#if defined(CONFIG_MODULES)
2816 kdb_register_repeat("lsmod", kdb_lsmod, "", 2882 kdb_register_flags("lsmod", kdb_lsmod, "",
2817 "List loaded kernel modules", 0, KDB_REPEAT_NONE); 2883 "List loaded kernel modules", 0,
2884 KDB_ENABLE_INSPECT);
2818#endif 2885#endif
2819#if defined(CONFIG_MAGIC_SYSRQ) 2886#if defined(CONFIG_MAGIC_SYSRQ)
2820 kdb_register_repeat("sr", kdb_sr, "<key>", 2887 kdb_register_flags("sr", kdb_sr, "<key>",
2821 "Magic SysRq key", 0, KDB_REPEAT_NONE); 2888 "Magic SysRq key", 0,
2889 KDB_ENABLE_ALWAYS_SAFE);
2822#endif 2890#endif
2823#if defined(CONFIG_PRINTK) 2891#if defined(CONFIG_PRINTK)
2824 kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", 2892 kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
2825 "Display syslog buffer", 0, KDB_REPEAT_NONE); 2893 "Display syslog buffer", 0,
2894 KDB_ENABLE_ALWAYS_SAFE);
2826#endif 2895#endif
2827 if (arch_kgdb_ops.enable_nmi) { 2896 if (arch_kgdb_ops.enable_nmi) {
2828 kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", 2897 kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
2829 "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); 2898 "Disable NMI entry to KDB", 0,
2830 } 2899 KDB_ENABLE_ALWAYS_SAFE);
2831 kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", 2900 }
2832 "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); 2901 kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
2833 kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", 2902 "Define a set of commands, down to endefcmd", 0,
2834 "Send a signal to a process", 0, KDB_REPEAT_NONE); 2903 KDB_ENABLE_ALWAYS_SAFE);
2835 kdb_register_repeat("summary", kdb_summary, "", 2904 kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
2836 "Summarize the system", 4, KDB_REPEAT_NONE); 2905 "Send a signal to a process", 0,
2837 kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", 2906 KDB_ENABLE_SIGNAL);
2838 "Display per_cpu variables", 3, KDB_REPEAT_NONE); 2907 kdb_register_flags("summary", kdb_summary, "",
2839 kdb_register_repeat("grephelp", kdb_grep_help, "", 2908 "Summarize the system", 4,
2840 "Display help on | grep", 0, KDB_REPEAT_NONE); 2909 KDB_ENABLE_ALWAYS_SAFE);
2910 kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
2911 "Display per_cpu variables", 3,
2912 KDB_ENABLE_MEM_READ);
2913 kdb_register_flags("grephelp", kdb_grep_help, "",
2914 "Display help on | grep", 0,
2915 KDB_ENABLE_ALWAYS_SAFE);
2841} 2916}
2842 2917
2843/* Execute any commands defined in kdb_cmds. */ 2918/* Execute any commands defined in kdb_cmds. */
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 7afd3c8c41d5..eaacd1693954 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
172 kdb_func_t cmd_func; /* Function to execute command */ 172 kdb_func_t cmd_func; /* Function to execute command */
173 char *cmd_usage; /* Usage String for this command */ 173 char *cmd_usage; /* Usage String for this command */
174 char *cmd_help; /* Help message for this command */ 174 char *cmd_help; /* Help message for this command */
175 short cmd_flags; /* Parsing flags */
176 short cmd_minlen; /* Minimum legal # command 175 short cmd_minlen; /* Minimum legal # command
177 * chars required */ 176 * chars required */
178 kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ 177 kdb_cmdflags_t cmd_flags; /* Command behaviour flags */
179} kdbtab_t; 178} kdbtab_t;
180 179
181extern int kdb_bt(int, const char **); /* KDB display back trace */ 180extern int kdb_bt(int, const char **); /* KDB display back trace */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c1ee7f2bebc..882f835a0d85 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
4461} 4461}
4462 4462
4463static void perf_sample_regs_user(struct perf_regs *regs_user, 4463static void perf_sample_regs_user(struct perf_regs *regs_user,
4464 struct pt_regs *regs) 4464 struct pt_regs *regs,
4465 struct pt_regs *regs_user_copy)
4465{ 4466{
4466 if (!user_mode(regs)) { 4467 if (user_mode(regs)) {
4467 if (current->mm) 4468 regs_user->abi = perf_reg_abi(current);
4468 regs = task_pt_regs(current);
4469 else
4470 regs = NULL;
4471 }
4472
4473 if (regs) {
4474 regs_user->abi = perf_reg_abi(current);
4475 regs_user->regs = regs; 4469 regs_user->regs = regs;
4470 } else if (current->mm) {
4471 perf_get_regs_user(regs_user, regs, regs_user_copy);
4476 } else { 4472 } else {
4477 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4473 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
4478 regs_user->regs = NULL; 4474 regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
4951 } 4947 }
4952 4948
4953 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 4949 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
4954 perf_sample_regs_user(&data->regs_user, regs); 4950 perf_sample_regs_user(&data->regs_user, regs,
4951 &data->regs_user_copy);
4955 4952
4956 if (sample_type & PERF_SAMPLE_REGS_USER) { 4953 if (sample_type & PERF_SAMPLE_REGS_USER) {
4957 /* regs dump ABI info */ 4954 /* regs dump ABI info */
diff --git a/kernel/exit.c b/kernel/exit.c
index 1ea4369890a3..6806c55475ee 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1287static int wait_consider_task(struct wait_opts *wo, int ptrace, 1287static int wait_consider_task(struct wait_opts *wo, int ptrace,
1288 struct task_struct *p) 1288 struct task_struct *p)
1289{ 1289{
1290 /*
1291 * We can race with wait_task_zombie() from another thread.
1292 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1293 * can't confuse the checks below.
1294 */
1295 int exit_state = ACCESS_ONCE(p->exit_state);
1290 int ret; 1296 int ret;
1291 1297
1292 if (unlikely(p->exit_state == EXIT_DEAD)) 1298 if (unlikely(exit_state == EXIT_DEAD))
1293 return 0; 1299 return 0;
1294 1300
1295 ret = eligible_child(wo, p); 1301 ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1310 return 0; 1316 return 0;
1311 } 1317 }
1312 1318
1313 if (unlikely(p->exit_state == EXIT_TRACE)) { 1319 if (unlikely(exit_state == EXIT_TRACE)) {
1314 /* 1320 /*
1315 * ptrace == 0 means we are the natural parent. In this case 1321 * ptrace == 0 means we are the natural parent. In this case
1316 * we should clear notask_error, debugger will notify us. 1322 * we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1337 } 1343 }
1338 1344
1339 /* slay zombie? */ 1345 /* slay zombie? */
1340 if (p->exit_state == EXIT_ZOMBIE) { 1346 if (exit_state == EXIT_ZOMBIE) {
1341 /* we don't reap group leaders with subthreads */ 1347 /* we don't reap group leaders with subthreads */
1342 if (!delay_group_leader(p)) { 1348 if (!delay_group_leader(p)) {
1343 /* 1349 /*
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 5cf6731b98e9..3ef3736002d8 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
80 DEBUG_LOCKS_WARN_ON(lock->owner != current); 80 DEBUG_LOCKS_WARN_ON(lock->owner != current);
81 81
82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
83 mutex_clear_owner(lock);
84 } 83 }
85 84
86 /* 85 /*
87 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug 86 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
88 * mutexes so that we can do it here after we've verified state. 87 * mutexes so that we can do it here after we've verified state.
89 */ 88 */
89 mutex_clear_owner(lock);
90 atomic_set(&lock->count, 1); 90 atomic_set(&lock->count, 1);
91} 91}
92 92
diff --git a/kernel/range.c b/kernel/range.c
index 322ea8e93e4b..82cfc285b046 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
113{ 113{
114 const struct range *r1 = x1; 114 const struct range *r1 = x1;
115 const struct range *r2 = x2; 115 const struct range *r2 = x2;
116 s64 start1, start2;
117 116
118 start1 = r1->start; 117 if (r1->start < r2->start)
119 start2 = r2->start; 118 return -1;
120 119 if (r1->start > r2->start)
121 return start1 - start2; 120 return 1;
121 return 0;
122} 122}
123 123
124int clean_sort_range(struct range *range, int az) 124int clean_sort_range(struct range *range, int az)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5797b78add6..c0accc00566e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
7113#ifdef CONFIG_RT_GROUP_SCHED 7113#ifdef CONFIG_RT_GROUP_SCHED
7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7115#endif 7115#endif
7116#ifdef CONFIG_CPUMASK_OFFSTACK
7117 alloc_size += num_possible_cpus() * cpumask_size();
7118#endif
7119 if (alloc_size) { 7116 if (alloc_size) {
7120 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7117 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7121 7118
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
7135 ptr += nr_cpu_ids * sizeof(void **); 7132 ptr += nr_cpu_ids * sizeof(void **);
7136 7133
7137#endif /* CONFIG_RT_GROUP_SCHED */ 7134#endif /* CONFIG_RT_GROUP_SCHED */
7135 }
7138#ifdef CONFIG_CPUMASK_OFFSTACK 7136#ifdef CONFIG_CPUMASK_OFFSTACK
7139 for_each_possible_cpu(i) { 7137 for_each_possible_cpu(i) {
7140 per_cpu(load_balance_mask, i) = (void *)ptr; 7138 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7141 ptr += cpumask_size(); 7139 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7142 }
7143#endif /* CONFIG_CPUMASK_OFFSTACK */
7144 } 7140 }
7141#endif /* CONFIG_CPUMASK_OFFSTACK */
7145 7142
7146 init_rt_bandwidth(&def_rt_bandwidth, 7143 init_rt_bandwidth(&def_rt_bandwidth,
7147 global_rt_period(), global_rt_runtime()); 7144 global_rt_period(), global_rt_runtime());
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e5db8c6feebd..b52092f2636d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
570static 570static
571int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 571int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
572{ 572{
573 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 573 return (dl_se->runtime <= 0);
574 int rorun = dl_se->runtime <= 0;
575
576 if (!rorun && !dmiss)
577 return 0;
578
579 /*
580 * If we are beyond our current deadline and we are still
581 * executing, then we have already used some of the runtime of
582 * the next instance. Thus, if we do not account that, we are
583 * stealing bandwidth from the system at each deadline miss!
584 */
585 if (dmiss) {
586 dl_se->runtime = rorun ? dl_se->runtime : 0;
587 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
588 }
589
590 return 1;
591} 574}
592 575
593extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 576extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
826 * parameters of the task might need updating. Otherwise, 809 * parameters of the task might need updating. Otherwise,
827 * we want a replenishment of its runtime. 810 * we want a replenishment of its runtime.
828 */ 811 */
829 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 812 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
830 replenish_dl_entity(dl_se, pi_se);
831 else
832 update_dl_entity(dl_se, pi_se); 813 update_dl_entity(dl_se, pi_se);
814 else if (flags & ENQUEUE_REPLENISH)
815 replenish_dl_entity(dl_se, pi_se);
833 816
834 __enqueue_dl_entity(dl_se); 817 __enqueue_dl_entity(dl_se);
835} 818}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df2cdf77f899..40667cbf371b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
4005 4005
4006static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4006static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4007{ 4007{
4008 /* init_cfs_bandwidth() was not called */
4009 if (!cfs_b->throttled_cfs_rq.next)
4010 return;
4011
4008 hrtimer_cancel(&cfs_b->period_timer); 4012 hrtimer_cancel(&cfs_b->period_timer);
4009 hrtimer_cancel(&cfs_b->slack_timer); 4013 hrtimer_cancel(&cfs_b->slack_timer);
4010} 4014}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4424 * wl = S * s'_i; see (2) 4428 * wl = S * s'_i; see (2)
4425 */ 4429 */
4426 if (W > 0 && w < W) 4430 if (W > 0 && w < W)
4427 wl = (w * tg->shares) / W; 4431 wl = (w * (long)tg->shares) / W;
4428 else 4432 else
4429 wl = tg->shares; 4433 wl = tg->shares;
4430 4434
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 929a733d302e..224e768bdc73 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
2497} 2497}
2498 2498
2499static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2499static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2500 struct ftrace_hash *old_hash) 2500 struct ftrace_ops_hash *old_hash)
2501{ 2501{
2502 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2502 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2503 ops->old_hash.filter_hash = old_hash; 2503 ops->old_hash.filter_hash = old_hash->filter_hash;
2504 ops->old_hash.notrace_hash = old_hash->notrace_hash;
2504 ftrace_run_update_code(command); 2505 ftrace_run_update_code(command);
2505 ops->old_hash.filter_hash = NULL; 2506 ops->old_hash.filter_hash = NULL;
2507 ops->old_hash.notrace_hash = NULL;
2506 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2508 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2507} 2509}
2508 2510
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
3579 3581
3580static int ftrace_probe_registered; 3582static int ftrace_probe_registered;
3581 3583
3582static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) 3584static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3583{ 3585{
3584 int ret; 3586 int ret;
3585 int i; 3587 int i;
@@ -3637,6 +3639,7 @@ int
3637register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3639register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3638 void *data) 3640 void *data)
3639{ 3641{
3642 struct ftrace_ops_hash old_hash_ops;
3640 struct ftrace_func_probe *entry; 3643 struct ftrace_func_probe *entry;
3641 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3644 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3642 struct ftrace_hash *old_hash = *orig_hash; 3645 struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3658 3661
3659 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3662 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3660 3663
3664 old_hash_ops.filter_hash = old_hash;
3665 /* Probes only have filters */
3666 old_hash_ops.notrace_hash = NULL;
3667
3661 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 3668 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3662 if (!hash) { 3669 if (!hash) {
3663 count = -ENOMEM; 3670 count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3718 3725
3719 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3726 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3720 3727
3721 __enable_ftrace_function_probe(old_hash); 3728 __enable_ftrace_function_probe(&old_hash_ops);
3722 3729
3723 if (!ret) 3730 if (!ret)
3724 free_ftrace_hash_rcu(old_hash); 3731 free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4006} 4013}
4007 4014
4008static void ftrace_ops_update_code(struct ftrace_ops *ops, 4015static void ftrace_ops_update_code(struct ftrace_ops *ops,
4009 struct ftrace_hash *old_hash) 4016 struct ftrace_ops_hash *old_hash)
4010{ 4017{
4011 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) 4018 struct ftrace_ops *op;
4019
4020 if (!ftrace_enabled)
4021 return;
4022
4023 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4012 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 4024 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4025 return;
4026 }
4027
4028 /*
4029 * If this is the shared global_ops filter, then we need to
4030 * check if there is another ops that shares it, is enabled.
4031 * If so, we still need to run the modify code.
4032 */
4033 if (ops->func_hash != &global_ops.local_hash)
4034 return;
4035
4036 do_for_each_ftrace_op(op, ftrace_ops_list) {
4037 if (op->func_hash == &global_ops.local_hash &&
4038 op->flags & FTRACE_OPS_FL_ENABLED) {
4039 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4040 /* Only need to do this once */
4041 return;
4042 }
4043 } while_for_each_ftrace_op(op);
4013} 4044}
4014 4045
4015static int 4046static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4017 unsigned long ip, int remove, int reset, int enable) 4048 unsigned long ip, int remove, int reset, int enable)
4018{ 4049{
4019 struct ftrace_hash **orig_hash; 4050 struct ftrace_hash **orig_hash;
4051 struct ftrace_ops_hash old_hash_ops;
4020 struct ftrace_hash *old_hash; 4052 struct ftrace_hash *old_hash;
4021 struct ftrace_hash *hash; 4053 struct ftrace_hash *hash;
4022 int ret; 4054 int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4053 4085
4054 mutex_lock(&ftrace_lock); 4086 mutex_lock(&ftrace_lock);
4055 old_hash = *orig_hash; 4087 old_hash = *orig_hash;
4088 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4089 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4056 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4090 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4057 if (!ret) { 4091 if (!ret) {
4058 ftrace_ops_update_code(ops, old_hash); 4092 ftrace_ops_update_code(ops, &old_hash_ops);
4059 free_ftrace_hash_rcu(old_hash); 4093 free_ftrace_hash_rcu(old_hash);
4060 } 4094 }
4061 mutex_unlock(&ftrace_lock); 4095 mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
4267int ftrace_regex_release(struct inode *inode, struct file *file) 4301int ftrace_regex_release(struct inode *inode, struct file *file)
4268{ 4302{
4269 struct seq_file *m = (struct seq_file *)file->private_data; 4303 struct seq_file *m = (struct seq_file *)file->private_data;
4304 struct ftrace_ops_hash old_hash_ops;
4270 struct ftrace_iterator *iter; 4305 struct ftrace_iterator *iter;
4271 struct ftrace_hash **orig_hash; 4306 struct ftrace_hash **orig_hash;
4272 struct ftrace_hash *old_hash; 4307 struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
4300 4335
4301 mutex_lock(&ftrace_lock); 4336 mutex_lock(&ftrace_lock);
4302 old_hash = *orig_hash; 4337 old_hash = *orig_hash;
4338 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4339 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4303 ret = ftrace_hash_move(iter->ops, filter_hash, 4340 ret = ftrace_hash_move(iter->ops, filter_hash,
4304 orig_hash, iter->hash); 4341 orig_hash, iter->hash);
4305 if (!ret) { 4342 if (!ret) {
4306 ftrace_ops_update_code(iter->ops, old_hash); 4343 ftrace_ops_update_code(iter->ops, &old_hash_ops);
4307 free_ftrace_hash_rcu(old_hash); 4344 free_ftrace_hash_rcu(old_hash);
4308 } 4345 }
4309 mutex_unlock(&ftrace_lock); 4346 mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2e767972e99c..4a9079b9f082 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6918,7 +6918,6 @@ void __init trace_init(void)
6918 tracepoint_printk = 0; 6918 tracepoint_printk = 0;
6919 } 6919 }
6920 tracer_alloc_buffers(); 6920 tracer_alloc_buffers();
6921 init_ftrace_syscalls();
6922 trace_event_init(); 6921 trace_event_init();
6923} 6922}
6924 6923
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 366a78a3e61e..b03a0ea77b99 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
2429 return 0; 2429 return 0;
2430} 2430}
2431 2431
2432static __init void
2433early_enable_events(struct trace_array *tr, bool disable_first)
2434{
2435 char *buf = bootup_event_buf;
2436 char *token;
2437 int ret;
2438
2439 while (true) {
2440 token = strsep(&buf, ",");
2441
2442 if (!token)
2443 break;
2444 if (!*token)
2445 continue;
2446
2447 /* Restarting syscalls requires that we stop them first */
2448 if (disable_first)
2449 ftrace_set_clr_event(tr, token, 0);
2450
2451 ret = ftrace_set_clr_event(tr, token, 1);
2452 if (ret)
2453 pr_warn("Failed to enable trace event: %s\n", token);
2454
2455 /* Put back the comma to allow this to be called again */
2456 if (buf)
2457 *(buf - 1) = ',';
2458 }
2459}
2460
2432static __init int event_trace_enable(void) 2461static __init int event_trace_enable(void)
2433{ 2462{
2434 struct trace_array *tr = top_trace_array(); 2463 struct trace_array *tr = top_trace_array();
2435 struct ftrace_event_call **iter, *call; 2464 struct ftrace_event_call **iter, *call;
2436 char *buf = bootup_event_buf;
2437 char *token;
2438 int ret; 2465 int ret;
2439 2466
2440 if (!tr) 2467 if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
2456 */ 2483 */
2457 __trace_early_add_events(tr); 2484 __trace_early_add_events(tr);
2458 2485
2459 while (true) { 2486 early_enable_events(tr, false);
2460 token = strsep(&buf, ",");
2461
2462 if (!token)
2463 break;
2464 if (!*token)
2465 continue;
2466
2467 ret = ftrace_set_clr_event(tr, token, 1);
2468 if (ret)
2469 pr_warn("Failed to enable trace event: %s\n", token);
2470 }
2471 2487
2472 trace_printk_start_comm(); 2488 trace_printk_start_comm();
2473 2489
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
2478 return 0; 2494 return 0;
2479} 2495}
2480 2496
2497/*
2498 * event_trace_enable() is called from trace_event_init() first to
2499 * initialize events and perhaps start any events that are on the
2500 * command line. Unfortunately, there are some events that will not
2501 * start this early, like the system call tracepoints that need
2502 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
2503 * is called before pid 1 starts, and this flag is never set, making
2504 * the syscall tracepoint never get reached, but the event is enabled
2505 * regardless (and not doing anything).
2506 */
2507static __init int event_trace_enable_again(void)
2508{
2509 struct trace_array *tr;
2510
2511 tr = top_trace_array();
2512 if (!tr)
2513 return -ENODEV;
2514
2515 early_enable_events(tr, true);
2516
2517 return 0;
2518}
2519
2520early_initcall(event_trace_enable_again);
2521
2481static __init int event_trace_init(void) 2522static __init int event_trace_init(void)
2482{ 2523{
2483 struct trace_array *tr; 2524 struct trace_array *tr;
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index b0b1c44e923a..3ccf5c2c1320 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
132 132
133static __init int kdb_ftrace_register(void) 133static __init int kdb_ftrace_register(void)
134{ 134{
135 kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 135 kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
136 "Dump ftrace log", 0, KDB_REPEAT_NONE); 136 "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
137 return 0; 137 return 0;
138} 138}
139 139
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 358eb81fa28d..c635a107a7de 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -73,6 +73,31 @@ config KGDB_KDB
73 help 73 help
74 KDB frontend for kernel 74 KDB frontend for kernel
75 75
76config KDB_DEFAULT_ENABLE
77 hex "KDB: Select kdb command functions to be enabled by default"
78 depends on KGDB_KDB
79 default 0x1
80 help
81 Specifiers which kdb commands are enabled by default. This may
82 be set to 1 or 0 to enable all commands or disable almost all
83 commands.
84
85 Alternatively the following bitmask applies:
86
87 0x0002 - allow arbitrary reads from memory and symbol lookup
88 0x0004 - allow arbitrary writes to memory
89 0x0008 - allow current register state to be inspected
90 0x0010 - allow current register state to be modified
91 0x0020 - allow passive inspection (backtrace, process list, lsmod)
92 0x0040 - allow flow control management (breakpoint, single step)
93 0x0080 - enable signalling of processes
94 0x0100 - allow machine to be rebooted
95
96 The config option merely sets the default at boot time. Both
97 issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
98 setting with kdb.cmd_enable=X kernel command line option will
99 override the default settings.
100
76config KDB_KEYBOARD 101config KDB_KEYBOARD
77 bool "KGDB_KDB: keyboard as input device" 102 bool "KGDB_KDB: keyboard as input device"
78 depends on VT && KGDB_KDB 103 depends on VT && KGDB_KDB
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 2404d03e251a..03dd576e6773 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -11,6 +11,7 @@
11 * 2 of the Licence, or (at your option) any later version. 11 * 2 of the Licence, or (at your option) any later version.
12 */ 12 */
13//#define DEBUG 13//#define DEBUG
14#include <linux/rcupdate.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/assoc_array_priv.h> 17#include <linux/assoc_array_priv.h>
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 56badfc4810a..957d3da53ddd 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
14 depends on !KMEMCHECK 14 depends on !KMEMCHECK
15 select PAGE_EXTENSION 15 select PAGE_EXTENSION
16 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC 16 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
17 select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
18 ---help--- 17 ---help---
19 Unmap pages from the kernel linear mapping after free_pages(). 18 Unmap pages from the kernel linear mapping after free_pages().
20 This results in a large slowdown, but helps to find certain types 19 This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
27 that would result in incorrect warnings of memory corruption after 26 that would result in incorrect warnings of memory corruption after
28 a resume because free pages are not saved to the suspend image. 27 a resume because free pages are not saved to the suspend image.
29 28
30config WANT_PAGE_DEBUG_FLAGS
31 bool
32
33config PAGE_POISONING 29config PAGE_POISONING
34 bool 30 bool
35 select WANT_PAGE_DEBUG_FLAGS
36
37config PAGE_GUARD
38 bool
39 select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/filemap.c b/mm/filemap.c
index bd8543c6508f..673e4581a2e5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1046,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry);
1046 * @mapping: the address_space to search 1046 * @mapping: the address_space to search
1047 * @offset: the page index 1047 * @offset: the page index
1048 * @fgp_flags: PCG flags 1048 * @fgp_flags: PCG flags
1049 * @cache_gfp_mask: gfp mask to use for the page cache data page allocation 1049 * @gfp_mask: gfp mask to use for the page cache data page allocation
1050 * @radix_gfp_mask: gfp mask to use for radix tree node allocation
1051 * 1050 *
1052 * Looks up the page cache slot at @mapping & @offset. 1051 * Looks up the page cache slot at @mapping & @offset.
1053 * 1052 *
@@ -1056,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry);
1056 * FGP_ACCESSED: the page will be marked accessed 1055 * FGP_ACCESSED: the page will be marked accessed
1057 * FGP_LOCK: Page is return locked 1056 * FGP_LOCK: Page is return locked
1058 * FGP_CREAT: If page is not present then a new page is allocated using 1057 * FGP_CREAT: If page is not present then a new page is allocated using
1059 * @cache_gfp_mask and added to the page cache and the VM's LRU 1058 * @gfp_mask and added to the page cache and the VM's LRU
1060 * list. If radix tree nodes are allocated during page cache 1059 * list. The page is returned locked and with an increased
1061 * insertion then @radix_gfp_mask is used. The page is returned 1060 * refcount. Otherwise, %NULL is returned.
1062 * locked and with an increased refcount. Otherwise, %NULL is
1063 * returned.
1064 * 1061 *
1065 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1062 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1066 * if the GFP flags specified for FGP_CREAT are atomic. 1063 * if the GFP flags specified for FGP_CREAT are atomic.
@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry);
1068 * If there is a page cache page, it is returned with an increased refcount. 1065 * If there is a page cache page, it is returned with an increased refcount.
1069 */ 1066 */
1070struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1067struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1071 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) 1068 int fgp_flags, gfp_t gfp_mask)
1072{ 1069{
1073 struct page *page; 1070 struct page *page;
1074 1071
@@ -1105,13 +1102,11 @@ no_page:
1105 if (!page && (fgp_flags & FGP_CREAT)) { 1102 if (!page && (fgp_flags & FGP_CREAT)) {
1106 int err; 1103 int err;
1107 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1104 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1108 cache_gfp_mask |= __GFP_WRITE; 1105 gfp_mask |= __GFP_WRITE;
1109 if (fgp_flags & FGP_NOFS) { 1106 if (fgp_flags & FGP_NOFS)
1110 cache_gfp_mask &= ~__GFP_FS; 1107 gfp_mask &= ~__GFP_FS;
1111 radix_gfp_mask &= ~__GFP_FS;
1112 }
1113 1108
1114 page = __page_cache_alloc(cache_gfp_mask); 1109 page = __page_cache_alloc(gfp_mask);
1115 if (!page) 1110 if (!page)
1116 return NULL; 1111 return NULL;
1117 1112
@@ -1122,7 +1117,8 @@ no_page:
1122 if (fgp_flags & FGP_ACCESSED) 1117 if (fgp_flags & FGP_ACCESSED)
1123 __SetPageReferenced(page); 1118 __SetPageReferenced(page);
1124 1119
1125 err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); 1120 err = add_to_page_cache_lru(page, mapping, offset,
1121 gfp_mask & GFP_RECLAIM_MASK);
1126 if (unlikely(err)) { 1122 if (unlikely(err)) {
1127 page_cache_release(page); 1123 page_cache_release(page);
1128 page = NULL; 1124 page = NULL;
@@ -2443,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
2443 fgp_flags |= FGP_NOFS; 2439 fgp_flags |= FGP_NOFS;
2444 2440
2445 page = pagecache_get_page(mapping, index, fgp_flags, 2441 page = pagecache_get_page(mapping, index, fgp_flags,
2446 mapping_gfp_mask(mapping), 2442 mapping_gfp_mask(mapping));
2447 GFP_KERNEL);
2448 if (page) 2443 if (page)
2449 wait_for_stable_page(page); 2444 wait_for_stable_page(page);
2450 2445
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ef91e856c7e4..851924fa5170 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
3043 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3043 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3044 mem_cgroup_swap_statistics(from, false); 3044 mem_cgroup_swap_statistics(from, false);
3045 mem_cgroup_swap_statistics(to, true); 3045 mem_cgroup_swap_statistics(to, true);
3046 /*
3047 * This function is only called from task migration context now.
3048 * It postpones page_counter and refcount handling till the end
3049 * of task migration(mem_cgroup_clear_mc()) for performance
3050 * improvement. But we cannot postpone css_get(to) because if
3051 * the process that has been moved to @to does swap-in, the
3052 * refcount of @to might be decreased to 0.
3053 *
3054 * We are in attach() phase, so the cgroup is guaranteed to be
3055 * alive, so we can just call css_get().
3056 */
3057 css_get(&to->css);
3058 return 0; 3046 return 0;
3059 } 3047 }
3060 return -EINVAL; 3048 return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4679 if (parent_css == NULL) { 4667 if (parent_css == NULL) {
4680 root_mem_cgroup = memcg; 4668 root_mem_cgroup = memcg;
4681 page_counter_init(&memcg->memory, NULL); 4669 page_counter_init(&memcg->memory, NULL);
4670 memcg->soft_limit = PAGE_COUNTER_MAX;
4682 page_counter_init(&memcg->memsw, NULL); 4671 page_counter_init(&memcg->memsw, NULL);
4683 page_counter_init(&memcg->kmem, NULL); 4672 page_counter_init(&memcg->kmem, NULL);
4684 } 4673 }
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
4724 4713
4725 if (parent->use_hierarchy) { 4714 if (parent->use_hierarchy) {
4726 page_counter_init(&memcg->memory, &parent->memory); 4715 page_counter_init(&memcg->memory, &parent->memory);
4716 memcg->soft_limit = PAGE_COUNTER_MAX;
4727 page_counter_init(&memcg->memsw, &parent->memsw); 4717 page_counter_init(&memcg->memsw, &parent->memsw);
4728 page_counter_init(&memcg->kmem, &parent->kmem); 4718 page_counter_init(&memcg->kmem, &parent->kmem);
4729 4719
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
4733 */ 4723 */
4734 } else { 4724 } else {
4735 page_counter_init(&memcg->memory, NULL); 4725 page_counter_init(&memcg->memory, NULL);
4726 memcg->soft_limit = PAGE_COUNTER_MAX;
4736 page_counter_init(&memcg->memsw, NULL); 4727 page_counter_init(&memcg->memsw, NULL);
4737 page_counter_init(&memcg->kmem, NULL); 4728 page_counter_init(&memcg->kmem, NULL);
4738 /* 4729 /*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4807 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4798 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4808 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4799 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4809 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4800 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
4810 memcg->soft_limit = 0; 4801 memcg->soft_limit = PAGE_COUNTER_MAX;
4811} 4802}
4812 4803
4813#ifdef CONFIG_MMU 4804#ifdef CONFIG_MMU
diff --git a/mm/memory.c b/mm/memory.c
index 649e7d440bd7..54f3a9b00956 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
235 235
236static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 236static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
237{ 237{
238 if (!tlb->end)
239 return;
240
238 tlb_flush(tlb); 241 tlb_flush(tlb);
239 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
240#ifdef CONFIG_HAVE_RCU_TABLE_FREE 243#ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
247{ 250{
248 struct mmu_gather_batch *batch; 251 struct mmu_gather_batch *batch;
249 252
250 for (batch = &tlb->local; batch; batch = batch->next) { 253 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
251 free_pages_and_swap_cache(batch->pages, batch->nr); 254 free_pages_and_swap_cache(batch->pages, batch->nr);
252 batch->nr = 0; 255 batch->nr = 0;
253 } 256 }
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
256 259
257void tlb_flush_mmu(struct mmu_gather *tlb) 260void tlb_flush_mmu(struct mmu_gather *tlb)
258{ 261{
259 if (!tlb->end)
260 return;
261
262 tlb_flush_mmu_tlbonly(tlb); 262 tlb_flush_mmu_tlbonly(tlb);
263 tlb_flush_mmu_free(tlb); 263 tlb_flush_mmu_free(tlb);
264} 264}
@@ -2137,17 +2137,24 @@ reuse:
2137 if (!dirty_page) 2137 if (!dirty_page)
2138 return ret; 2138 return ret;
2139 2139
2140 /*
2141 * Yes, Virginia, this is actually required to prevent a race
2142 * with clear_page_dirty_for_io() from clearing the page dirty
2143 * bit after it clear all dirty ptes, but before a racing
2144 * do_wp_page installs a dirty pte.
2145 *
2146 * do_shared_fault is protected similarly.
2147 */
2148 if (!page_mkwrite) { 2140 if (!page_mkwrite) {
2149 wait_on_page_locked(dirty_page); 2141 struct address_space *mapping;
2150 set_page_dirty_balance(dirty_page); 2142 int dirtied;
2143
2144 lock_page(dirty_page);
2145 dirtied = set_page_dirty(dirty_page);
2146 VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
2147 mapping = dirty_page->mapping;
2148 unlock_page(dirty_page);
2149
2150 if (dirtied && mapping) {
2151 /*
2152 * Some device drivers do not set page.mapping
2153 * but still dirty their pages
2154 */
2155 balance_dirty_pages_ratelimited(mapping);
2156 }
2157
2151 /* file_update_time outside page_lock */ 2158 /* file_update_time outside page_lock */
2152 if (vma->vm_file) 2159 if (vma->vm_file)
2153 file_update_time(vma->vm_file); 2160 file_update_time(vma->vm_file);
@@ -2378,12 +2385,12 @@ void unmap_mapping_range(struct address_space *mapping,
2378 details.last_index = ULONG_MAX; 2385 details.last_index = ULONG_MAX;
2379 2386
2380 2387
2381 i_mmap_lock_read(mapping); 2388 i_mmap_lock_write(mapping);
2382 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) 2389 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2383 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2390 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2384 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2391 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2385 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2392 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2386 i_mmap_unlock_read(mapping); 2393 i_mmap_unlock_write(mapping);
2387} 2394}
2388EXPORT_SYMBOL(unmap_mapping_range); 2395EXPORT_SYMBOL(unmap_mapping_range);
2389 2396
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
2593 if (prev && prev->vm_end == address) 2600 if (prev && prev->vm_end == address)
2594 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 2601 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2595 2602
2596 expand_downwards(vma, address - PAGE_SIZE); 2603 return expand_downwards(vma, address - PAGE_SIZE);
2597 } 2604 }
2598 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 2605 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2599 struct vm_area_struct *next = vma->vm_next; 2606 struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
2602 if (next && next->vm_start == address + PAGE_SIZE) 2609 if (next && next->vm_start == address + PAGE_SIZE)
2603 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; 2610 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
2604 2611
2605 expand_upwards(vma, address + PAGE_SIZE); 2612 return expand_upwards(vma, address + PAGE_SIZE);
2606 } 2613 }
2607 return 0; 2614 return 0;
2608} 2615}
diff --git a/mm/mmap.c b/mm/mmap.c
index 7b36aa7cc89a..7f684d5a8087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again: remove_next = 1 + (end > next->vm_end);
778 if (exporter && exporter->anon_vma && !importer->anon_vma) { 778 if (exporter && exporter->anon_vma && !importer->anon_vma) {
779 int error; 779 int error;
780 780
781 importer->anon_vma = exporter->anon_vma;
781 error = anon_vma_clone(importer, exporter); 782 error = anon_vma_clone(importer, exporter);
782 if (error) 783 if (error) {
784 importer->anon_vma = NULL;
783 return error; 785 return error;
784 importer->anon_vma = exporter->anon_vma; 786 }
785 } 787 }
786 } 788 }
787 789
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2099{ 2101{
2100 struct mm_struct *mm = vma->vm_mm; 2102 struct mm_struct *mm = vma->vm_mm;
2101 struct rlimit *rlim = current->signal->rlim; 2103 struct rlimit *rlim = current->signal->rlim;
2102 unsigned long new_start; 2104 unsigned long new_start, actual_size;
2103 2105
2104 /* address space limit tests */ 2106 /* address space limit tests */
2105 if (!may_expand_vm(mm, grow)) 2107 if (!may_expand_vm(mm, grow))
2106 return -ENOMEM; 2108 return -ENOMEM;
2107 2109
2108 /* Stack limit test */ 2110 /* Stack limit test */
2109 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2111 actual_size = size;
2112 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2113 actual_size -= PAGE_SIZE;
2114 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2110 return -ENOMEM; 2115 return -ENOMEM;
2111 2116
2112 /* mlock limit tests */ 2117 /* mlock limit tests */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d5d81f5384d1..6f4335238e33 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1541,16 +1541,6 @@ pause:
1541 bdi_start_background_writeback(bdi); 1541 bdi_start_background_writeback(bdi);
1542} 1542}
1543 1543
1544void set_page_dirty_balance(struct page *page)
1545{
1546 if (set_page_dirty(page)) {
1547 struct address_space *mapping = page_mapping(page);
1548
1549 if (mapping)
1550 balance_dirty_pages_ratelimited(mapping);
1551 }
1552}
1553
1554static DEFINE_PER_CPU(int, bdp_ratelimits); 1544static DEFINE_PER_CPU(int, bdp_ratelimits);
1555 1545
1556/* 1546/*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
2123 * page dirty in that case, but not all the buffers. This is a "bottom-up" 2113 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2124 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. 2114 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2125 * 2115 *
2126 * Most callers have locked the page, which pins the address_space in memory. 2116 * The caller must ensure this doesn't race with truncation. Most will simply
2127 * But zap_pte_range() does not lock the page, however in that case the 2117 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2128 * mapping is pinned by the vma's ->vm_file reference. 2118 * the pte lock held, which also locks out truncation.
2129 *
2130 * We take care to handle the case where the page was truncated from the
2131 * mapping by re-checking page_mapping() inside tree_lock.
2132 */ 2119 */
2133int __set_page_dirty_nobuffers(struct page *page) 2120int __set_page_dirty_nobuffers(struct page *page)
2134{ 2121{
2135 if (!TestSetPageDirty(page)) { 2122 if (!TestSetPageDirty(page)) {
2136 struct address_space *mapping = page_mapping(page); 2123 struct address_space *mapping = page_mapping(page);
2137 struct address_space *mapping2;
2138 unsigned long flags; 2124 unsigned long flags;
2139 2125
2140 if (!mapping) 2126 if (!mapping)
2141 return 1; 2127 return 1;
2142 2128
2143 spin_lock_irqsave(&mapping->tree_lock, flags); 2129 spin_lock_irqsave(&mapping->tree_lock, flags);
2144 mapping2 = page_mapping(page); 2130 BUG_ON(page_mapping(page) != mapping);
2145 if (mapping2) { /* Race with truncate? */ 2131 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2146 BUG_ON(mapping2 != mapping); 2132 account_page_dirtied(page, mapping);
2147 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2133 radix_tree_tag_set(&mapping->page_tree, page_index(page),
2148 account_page_dirtied(page, mapping); 2134 PAGECACHE_TAG_DIRTY);
2149 radix_tree_tag_set(&mapping->page_tree,
2150 page_index(page), PAGECACHE_TAG_DIRTY);
2151 }
2152 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2135 spin_unlock_irqrestore(&mapping->tree_lock, flags);
2153 if (mapping->host) { 2136 if (mapping->host) {
2154 /* !PageAnon && !swapper_space */ 2137 /* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
2305 /* 2288 /*
2306 * We carefully synchronise fault handlers against 2289 * We carefully synchronise fault handlers against
2307 * installing a dirty pte and marking the page dirty 2290 * installing a dirty pte and marking the page dirty
2308 * at this point. We do this by having them hold the 2291 * at this point. We do this by having them hold the
2309 * page lock at some point after installing their 2292 * page lock while dirtying the page, and pages are
2310 * pte, but before marking the page dirty. 2293 * always locked coming in here, so we get the desired
2311 * Pages are always locked coming in here, so we get 2294 * exclusion.
2312 * the desired exclusion. See mm/memory.c:do_wp_page()
2313 * for more comments.
2314 */ 2295 */
2315 if (TestClearPageDirty(page)) { 2296 if (TestClearPageDirty(page)) {
2316 dec_zone_page_state(page, NR_FILE_DIRTY); 2297 dec_zone_page_state(page, NR_FILE_DIRTY);
diff --git a/mm/rmap.c b/mm/rmap.c
index c5bc241127b2..71cd5bd0c17d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 if (anon_vma) { 73 if (anon_vma) {
74 atomic_set(&anon_vma->refcount, 1); 74 atomic_set(&anon_vma->refcount, 1);
75 anon_vma->degree = 1; /* Reference for first vma */
76 anon_vma->parent = anon_vma;
75 /* 77 /*
76 * Initialise the anon_vma root to point to itself. If called 78 * Initialise the anon_vma root to point to itself. If called
77 * from fork, the root will be reset to the parents anon_vma. 79 * from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
188 if (likely(!vma->anon_vma)) { 190 if (likely(!vma->anon_vma)) {
189 vma->anon_vma = anon_vma; 191 vma->anon_vma = anon_vma;
190 anon_vma_chain_link(vma, avc, anon_vma); 192 anon_vma_chain_link(vma, avc, anon_vma);
193 /* vma reference or self-parent link for new root */
194 anon_vma->degree++;
191 allocated = NULL; 195 allocated = NULL;
192 avc = NULL; 196 avc = NULL;
193 } 197 }
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
236/* 240/*
237 * Attach the anon_vmas from src to dst. 241 * Attach the anon_vmas from src to dst.
238 * Returns 0 on success, -ENOMEM on failure. 242 * Returns 0 on success, -ENOMEM on failure.
243 *
244 * If dst->anon_vma is NULL this function tries to find and reuse existing
245 * anon_vma which has no vmas and only one child anon_vma. This prevents
246 * degradation of anon_vma hierarchy to endless linear chain in case of
247 * constantly forking task. On the other hand, an anon_vma with more than one
248 * child isn't reused even if there was no alive vma, thus rmap walker has a
249 * good chance of avoiding scanning the whole hierarchy when it searches where
250 * page is mapped.
239 */ 251 */
240int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 252int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
241{ 253{
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
256 anon_vma = pavc->anon_vma; 268 anon_vma = pavc->anon_vma;
257 root = lock_anon_vma_root(root, anon_vma); 269 root = lock_anon_vma_root(root, anon_vma);
258 anon_vma_chain_link(dst, avc, anon_vma); 270 anon_vma_chain_link(dst, avc, anon_vma);
271
272 /*
273 * Reuse existing anon_vma if its degree lower than two,
274 * that means it has no vma and only one anon_vma child.
275 *
276 * Do not chose parent anon_vma, otherwise first child
277 * will always reuse it. Root anon_vma is never reused:
278 * it has self-parent reference and at least one child.
279 */
280 if (!dst->anon_vma && anon_vma != src->anon_vma &&
281 anon_vma->degree < 2)
282 dst->anon_vma = anon_vma;
259 } 283 }
284 if (dst->anon_vma)
285 dst->anon_vma->degree++;
260 unlock_anon_vma_root(root); 286 unlock_anon_vma_root(root);
261 return 0; 287 return 0;
262 288
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
280 if (!pvma->anon_vma) 306 if (!pvma->anon_vma)
281 return 0; 307 return 0;
282 308
309 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
310 vma->anon_vma = NULL;
311
283 /* 312 /*
284 * First, attach the new VMA to the parent VMA's anon_vmas, 313 * First, attach the new VMA to the parent VMA's anon_vmas,
285 * so rmap can find non-COWed pages in child processes. 314 * so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
288 if (error) 317 if (error)
289 return error; 318 return error;
290 319
320 /* An existing anon_vma has been reused, all done then. */
321 if (vma->anon_vma)
322 return 0;
323
291 /* Then add our own anon_vma. */ 324 /* Then add our own anon_vma. */
292 anon_vma = anon_vma_alloc(); 325 anon_vma = anon_vma_alloc();
293 if (!anon_vma) 326 if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
301 * lock any of the anon_vmas in this anon_vma tree. 334 * lock any of the anon_vmas in this anon_vma tree.
302 */ 335 */
303 anon_vma->root = pvma->anon_vma->root; 336 anon_vma->root = pvma->anon_vma->root;
337 anon_vma->parent = pvma->anon_vma;
304 /* 338 /*
305 * With refcounts, an anon_vma can stay around longer than the 339 * With refcounts, an anon_vma can stay around longer than the
306 * process it belongs to. The root anon_vma needs to be pinned until 340 * process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
311 vma->anon_vma = anon_vma; 345 vma->anon_vma = anon_vma;
312 anon_vma_lock_write(anon_vma); 346 anon_vma_lock_write(anon_vma);
313 anon_vma_chain_link(vma, avc, anon_vma); 347 anon_vma_chain_link(vma, avc, anon_vma);
348 anon_vma->parent->degree++;
314 anon_vma_unlock_write(anon_vma); 349 anon_vma_unlock_write(anon_vma);
315 350
316 return 0; 351 return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
341 * Leave empty anon_vmas on the list - we'll need 376 * Leave empty anon_vmas on the list - we'll need
342 * to free them outside the lock. 377 * to free them outside the lock.
343 */ 378 */
344 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 379 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
380 anon_vma->parent->degree--;
345 continue; 381 continue;
382 }
346 383
347 list_del(&avc->same_vma); 384 list_del(&avc->same_vma);
348 anon_vma_chain_free(avc); 385 anon_vma_chain_free(avc);
349 } 386 }
387 if (vma->anon_vma)
388 vma->anon_vma->degree--;
350 unlock_anon_vma_root(root); 389 unlock_anon_vma_root(root);
351 390
352 /* 391 /*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
357 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 396 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
358 struct anon_vma *anon_vma = avc->anon_vma; 397 struct anon_vma *anon_vma = avc->anon_vma;
359 398
399 BUG_ON(anon_vma->degree);
360 put_anon_vma(anon_vma); 400 put_anon_vma(anon_vma);
361 401
362 list_del(&avc->same_vma); 402 list_del(&avc->same_vma);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd9a72bc4a1b..ab2505c3ef54 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2921 return false; 2921 return false;
2922 2922
2923 /* 2923 /*
2924 * There is a potential race between when kswapd checks its watermarks 2924 * The throttled processes are normally woken up in balance_pgdat() as
2925 * and a process gets throttled. There is also a potential race if 2925 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
2926 * processes get throttled, kswapd wakes, a large process exits therby 2926 * race between when kswapd checks the watermarks and a process gets
2927 * balancing the zones that causes kswapd to miss a wakeup. If kswapd 2927 * throttled. There is also a potential race if processes get
2928 * is going to sleep, no process should be sleeping on pfmemalloc_wait 2928 * throttled, kswapd wakes, a large process exits thereby balancing the
2929 * so wake them now if necessary. If necessary, processes will wake 2929 * zones, which causes kswapd to exit balance_pgdat() before reaching
2930 * kswapd and get throttled again 2930 * the wake up checks. If kswapd is going to sleep, no process should
2931 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
2932 * the wake up is premature, processes will wake kswapd and get
2933 * throttled again. The difference from wake ups in balance_pgdat() is
2934 * that here we are under prepare_to_wait().
2931 */ 2935 */
2932 if (waitqueue_active(&pgdat->pfmemalloc_wait)) { 2936 if (waitqueue_active(&pgdat->pfmemalloc_wait))
2933 wake_up(&pgdat->pfmemalloc_wait); 2937 wake_up_all(&pgdat->pfmemalloc_wait);
2934 return false;
2935 }
2936 2938
2937 return pgdat_balanced(pgdat, order, classzone_idx); 2939 return pgdat_balanced(pgdat, order, classzone_idx);
2938} 2940}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index fc1835c6bb40..00f9e144cc97 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
251 kfree(entry); 251 kfree(entry);
252 252
253 /* Make room for the rest of the fragments. */ 253 /* Make room for the rest of the fragments. */
254 if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) { 254 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
255 kfree_skb(skb_out); 255 kfree_skb(skb_out);
256 skb_out = NULL; 256 skb_out = NULL;
257 goto free; 257 goto free;
@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
434 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 434 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
435 */ 435 */
436 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 436 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
437 max_fragment_size = (mtu - header_size - ETH_HLEN); 437 max_fragment_size = mtu - header_size;
438 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 438 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
439 439
440 /* Don't even try to fragment, if we need more than 16 fragments */ 440 /* Don't even try to fragment, if we need more than 16 fragments */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 90cff585b37d..e0bcf9e84273 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
810 goto out; 810 goto out;
811 811
812 gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); 812 gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
813 if (!gw_node->bandwidth_down == 0) 813 if (!gw_node)
814 goto out; 814 goto out;
815 815
816 switch (atomic_read(&bat_priv->gw_mode)) { 816 switch (atomic_read(&bat_priv->gw_mode)) {
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index ab6bb2af1d45..b24e4bb64fb5 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
685 if (orig_initialized) 685 if (orig_initialized)
686 atomic_dec(&bat_priv->mcast.num_disabled); 686 atomic_dec(&bat_priv->mcast.num_disabled);
687 orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; 687 orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
688 /* If mcast support is being switched off increase the disabled 688 /* If mcast support is being switched off or if this is an initial
689 * mcast node counter. 689 * OGM without mcast support then increase the disabled mcast
690 * node counter.
690 */ 691 */
691 } else if (!orig_mcast_enabled && 692 } else if (!orig_mcast_enabled &&
692 orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { 693 (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
694 !orig_initialized)) {
693 atomic_inc(&bat_priv->mcast.num_disabled); 695 atomic_inc(&bat_priv->mcast.num_disabled);
694 orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; 696 orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
695 } 697 }
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
738{ 740{
739 struct batadv_priv *bat_priv = orig->bat_priv; 741 struct batadv_priv *bat_priv = orig->bat_priv;
740 742
741 if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) 743 if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
744 orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
742 atomic_dec(&bat_priv->mcast.num_disabled); 745 atomic_dec(&bat_priv->mcast.num_disabled);
743 746
744 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 747 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 8d04d174669e..fab47f1f3ef9 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
133 if (!bat_priv->nc.decoding_hash) 133 if (!bat_priv->nc.decoding_hash)
134 goto err; 134 goto err;
135 135
136 batadv_hash_set_lock_class(bat_priv->nc.coding_hash, 136 batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
137 &batadv_nc_decoding_hash_lock_class_key); 137 &batadv_nc_decoding_hash_lock_class_key);
138 138
139 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); 139 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6a484514cd3e..bea8198d0198 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
570 570
571 batadv_frag_purge_orig(orig_node, NULL); 571 batadv_frag_purge_orig(orig_node, NULL);
572 572
573 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
574 "originator timed out");
575
576 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) 573 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
577 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); 574 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
578 575
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
678 atomic_set(&orig_node->last_ttvn, 0); 675 atomic_set(&orig_node->last_ttvn, 0);
679 orig_node->tt_buff = NULL; 676 orig_node->tt_buff = NULL;
680 orig_node->tt_buff_len = 0; 677 orig_node->tt_buff_len = 0;
678 orig_node->last_seen = jiffies;
681 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 679 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
682 orig_node->bcast_seqno_reset = reset_time; 680 orig_node->bcast_seqno_reset = reset_time;
683#ifdef CONFIG_BATMAN_ADV_MCAST 681#ifdef CONFIG_BATMAN_ADV_MCAST
@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
977 if (batadv_purge_orig_node(bat_priv, orig_node)) { 975 if (batadv_purge_orig_node(bat_priv, orig_node)) {
978 batadv_gw_node_delete(bat_priv, orig_node); 976 batadv_gw_node_delete(bat_priv, orig_node);
979 hlist_del_rcu(&orig_node->hash_entry); 977 hlist_del_rcu(&orig_node->hash_entry);
978 batadv_tt_global_del_orig(orig_node->bat_priv,
979 orig_node, -1,
980 "originator timed out");
980 batadv_orig_node_free_ref(orig_node); 981 batadv_orig_node_free_ref(orig_node);
981 continue; 982 continue;
982 } 983 }
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 35f76f2f7824..6648f321864d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
443 443
444 router = batadv_orig_router_get(orig_node, recv_if); 444 router = batadv_orig_router_get(orig_node, recv_if);
445 445
446 if (!router)
447 return router;
448
446 /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) 449 /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
447 * and if activated. 450 * and if activated.
448 */ 451 */
449 if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || 452 if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
450 !router)
451 return router; 453 return router;
452 454
453 /* bonding: loop through the list of possible routers found 455 /* bonding: loop through the list of possible routers found
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 76617be1e797..c989253737f0 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -390,7 +390,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
390 390
391drop: 391drop:
392 dev->stats.rx_dropped++; 392 dev->stats.rx_dropped++;
393 kfree_skb(skb);
394 return NET_RX_DROP; 393 return NET_RX_DROP;
395} 394}
396 395
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 85bcc21e84d2..ce82722d049b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
533 533
534 BT_DBG(""); 534 BT_DBG("");
535 535
536 if (!l2cap_is_socket(sock))
537 return -EBADFD;
538
536 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); 539 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
537 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); 540 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
538 541
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 67fe5e84e68f..278a194e6af4 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
334 334
335 BT_DBG(""); 335 BT_DBG("");
336 336
337 if (!l2cap_is_socket(sock))
338 return -EBADFD;
339
337 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); 340 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
338 if (!session) 341 if (!session)
339 return -ENOMEM; 342 return -ENOMEM;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 39a5c8a01726..3f2e8b830cbd 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -242,7 +242,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
242 if (rp->status) 242 if (rp->status)
243 return; 243 return;
244 244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 245 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
246 test_bit(HCI_CONFIG, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 247 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247} 248}
248 249
@@ -509,7 +510,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
509 if (rp->status) 510 if (rp->status)
510 return; 511 return;
511 512
512 if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 513 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
514 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
513 hdev->hci_ver = rp->hci_ver; 515 hdev->hci_ver = rp->hci_ver;
514 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 516 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
515 hdev->lmp_ver = rp->lmp_ver; 517 hdev->lmp_ver = rp->lmp_ver;
@@ -528,7 +530,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
528 if (rp->status) 530 if (rp->status)
529 return; 531 return;
530 532
531 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 533 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
534 test_bit(HCI_CONFIG, &hdev->dev_flags))
532 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 535 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
533} 536}
534 537
@@ -2194,7 +2197,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2194 return; 2197 return;
2195 } 2198 }
2196 2199
2197 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && 2200 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2201 * connection. These features are only touched through mgmt so
2202 * only do the checks if HCI_MGMT is set.
2203 */
2204 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2205 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2198 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2206 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2199 BDADDR_BREDR)) { 2207 BDADDR_BREDR)) {
2200 hci_reject_conn(hdev, &ev->bdaddr); 2208 hci_reject_conn(hdev, &ev->bdaddr);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index cc25d0b74b36..07348e142f16 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1314,13 +1314,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
1314{ 1314{
1315 struct hidp_session *session; 1315 struct hidp_session *session;
1316 struct l2cap_conn *conn; 1316 struct l2cap_conn *conn;
1317 struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan; 1317 struct l2cap_chan *chan;
1318 int ret; 1318 int ret;
1319 1319
1320 ret = hidp_verify_sockets(ctrl_sock, intr_sock); 1320 ret = hidp_verify_sockets(ctrl_sock, intr_sock);
1321 if (ret) 1321 if (ret)
1322 return ret; 1322 return ret;
1323 1323
1324 chan = l2cap_pi(ctrl_sock->sk)->chan;
1324 conn = NULL; 1325 conn = NULL;
1325 l2cap_chan_lock(chan); 1326 l2cap_chan_lock(chan);
1326 if (chan->conn) 1327 if (chan->conn)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1f1de715197c..e2aa7be3a847 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
154 dst = NULL; 154 dst = NULL;
155 155
156 if (is_broadcast_ether_addr(dest)) { 156 if (is_broadcast_ether_addr(dest)) {
157 if (p->flags & BR_PROXYARP && 157 if (IS_ENABLED(CONFIG_INET) &&
158 p->flags & BR_PROXYARP &&
158 skb->protocol == htons(ETH_P_ARP)) 159 skb->protocol == htons(ETH_P_ARP))
159 br_do_proxy_arp(skb, br, vid); 160 br_do_proxy_arp(skb, br, vid);
160 161
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 15845814a0f2..ba6eb17226da 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
676 int ret; 676 int ret;
677 char tmp_enc[40]; 677 char tmp_enc[40];
678 __le32 tmp[5] = { 678 __le32 tmp[5] = {
679 16u, msg->hdr.crc, msg->footer.front_crc, 679 cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
680 msg->footer.middle_crc, msg->footer.data_crc, 680 msg->footer.middle_crc, msg->footer.data_crc,
681 }; 681 };
682 ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), 682 ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a83062ceeec9..f2148e22b148 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
717 if (src_len != sizeof(u32) + dst_len) 717 if (src_len != sizeof(u32) + dst_len)
718 return -EINVAL; 718 return -EINVAL;
719 719
720 buf_len = le32_to_cpu(*(u32 *)src); 720 buf_len = le32_to_cpu(*(__le32 *)src);
721 if (buf_len != dst_len) 721 if (buf_len != dst_len)
722 return -EINVAL; 722 return -EINVAL;
723 723
diff --git a/net/core/dev.c b/net/core/dev.c
index f411c28d0a66..171420e75b03 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1694,6 +1694,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1694 1694
1695 skb_scrub_packet(skb, true); 1695 skb_scrub_packet(skb, true);
1696 skb->protocol = eth_type_trans(skb, dev); 1696 skb->protocol = eth_type_trans(skb, dev);
1697 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1697 1698
1698 return 0; 1699 return 0;
1699} 1700}
@@ -2522,7 +2523,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2522/* If MPLS offload request, verify we are testing hardware MPLS features 2523/* If MPLS offload request, verify we are testing hardware MPLS features
2523 * instead of standard features for the netdev. 2524 * instead of standard features for the netdev.
2524 */ 2525 */
2525#ifdef CONFIG_NET_MPLS_GSO 2526#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2526static netdev_features_t net_mpls_features(struct sk_buff *skb, 2527static netdev_features_t net_mpls_features(struct sk_buff *skb,
2527 netdev_features_t features, 2528 netdev_features_t features,
2528 __be16 type) 2529 __be16 type)
@@ -2562,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2562 2563
2563netdev_features_t netif_skb_features(struct sk_buff *skb) 2564netdev_features_t netif_skb_features(struct sk_buff *skb)
2564{ 2565{
2565 const struct net_device *dev = skb->dev; 2566 struct net_device *dev = skb->dev;
2566 netdev_features_t features = dev->features; 2567 netdev_features_t features = dev->features;
2567 u16 gso_segs = skb_shinfo(skb)->gso_segs; 2568 u16 gso_segs = skb_shinfo(skb)->gso_segs;
2568 __be16 protocol = skb->protocol; 2569 __be16 protocol = skb->protocol;
@@ -2570,11 +2571,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2570 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) 2571 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2571 features &= ~NETIF_F_GSO_MASK; 2572 features &= ~NETIF_F_GSO_MASK;
2572 2573
2573 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2574 /* If encapsulation offload request, verify we are testing
2574 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2575 * hardware encapsulation features instead of standard
2575 protocol = veh->h_vlan_encapsulated_proto; 2576 * features for the netdev
2576 } else if (!vlan_tx_tag_present(skb)) { 2577 */
2577 return harmonize_features(skb, features); 2578 if (skb->encapsulation)
2579 features &= dev->hw_enc_features;
2580
2581 if (!vlan_tx_tag_present(skb)) {
2582 if (unlikely(protocol == htons(ETH_P_8021Q) ||
2583 protocol == htons(ETH_P_8021AD))) {
2584 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2585 protocol = veh->h_vlan_encapsulated_proto;
2586 } else {
2587 goto finalize;
2588 }
2578 } 2589 }
2579 2590
2580 features = netdev_intersect_features(features, 2591 features = netdev_intersect_features(features,
@@ -2591,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2591 NETIF_F_HW_VLAN_CTAG_TX | 2602 NETIF_F_HW_VLAN_CTAG_TX |
2592 NETIF_F_HW_VLAN_STAG_TX); 2603 NETIF_F_HW_VLAN_STAG_TX);
2593 2604
2605finalize:
2606 if (dev->netdev_ops->ndo_features_check)
2607 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2608 features);
2609
2594 return harmonize_features(skb, features); 2610 return harmonize_features(skb, features);
2595} 2611}
2596EXPORT_SYMBOL(netif_skb_features); 2612EXPORT_SYMBOL(netif_skb_features);
@@ -2661,19 +2677,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
2661 if (unlikely(!skb)) 2677 if (unlikely(!skb))
2662 goto out_null; 2678 goto out_null;
2663 2679
2664 /* If encapsulation offload request, verify we are testing
2665 * hardware encapsulation features instead of standard
2666 * features for the netdev
2667 */
2668 if (skb->encapsulation)
2669 features &= dev->hw_enc_features;
2670
2671 if (netif_needs_gso(dev, skb, features)) { 2680 if (netif_needs_gso(dev, skb, features)) {
2672 struct sk_buff *segs; 2681 struct sk_buff *segs;
2673 2682
2674 segs = skb_gso_segment(skb, features); 2683 segs = skb_gso_segment(skb, features);
2675 if (IS_ERR(segs)) { 2684 if (IS_ERR(segs)) {
2676 segs = NULL; 2685 goto out_kfree_skb;
2677 } else if (segs) { 2686 } else if (segs) {
2678 consume_skb(skb); 2687 consume_skb(skb);
2679 skb = segs; 2688 skb = segs;
@@ -4557,6 +4566,68 @@ void netif_napi_del(struct napi_struct *napi)
4557} 4566}
4558EXPORT_SYMBOL(netif_napi_del); 4567EXPORT_SYMBOL(netif_napi_del);
4559 4568
4569static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4570{
4571 void *have;
4572 int work, weight;
4573
4574 list_del_init(&n->poll_list);
4575
4576 have = netpoll_poll_lock(n);
4577
4578 weight = n->weight;
4579
4580 /* This NAPI_STATE_SCHED test is for avoiding a race
4581 * with netpoll's poll_napi(). Only the entity which
4582 * obtains the lock and sees NAPI_STATE_SCHED set will
4583 * actually make the ->poll() call. Therefore we avoid
4584 * accidentally calling ->poll() when NAPI is not scheduled.
4585 */
4586 work = 0;
4587 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4588 work = n->poll(n, weight);
4589 trace_napi_poll(n);
4590 }
4591
4592 WARN_ON_ONCE(work > weight);
4593
4594 if (likely(work < weight))
4595 goto out_unlock;
4596
4597 /* Drivers must not modify the NAPI state if they
4598 * consume the entire weight. In such cases this code
4599 * still "owns" the NAPI instance and therefore can
4600 * move the instance around on the list at-will.
4601 */
4602 if (unlikely(napi_disable_pending(n))) {
4603 napi_complete(n);
4604 goto out_unlock;
4605 }
4606
4607 if (n->gro_list) {
4608 /* flush too old packets
4609 * If HZ < 1000, flush all packets.
4610 */
4611 napi_gro_flush(n, HZ >= 1000);
4612 }
4613
4614 /* Some drivers may have called napi_schedule
4615 * prior to exhausting their budget.
4616 */
4617 if (unlikely(!list_empty(&n->poll_list))) {
4618 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4619 n->dev ? n->dev->name : "backlog");
4620 goto out_unlock;
4621 }
4622
4623 list_add_tail(&n->poll_list, repoll);
4624
4625out_unlock:
4626 netpoll_poll_unlock(have);
4627
4628 return work;
4629}
4630
4560static void net_rx_action(struct softirq_action *h) 4631static void net_rx_action(struct softirq_action *h)
4561{ 4632{
4562 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4633 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,74 +4635,34 @@ static void net_rx_action(struct softirq_action *h)
4564 int budget = netdev_budget; 4635 int budget = netdev_budget;
4565 LIST_HEAD(list); 4636 LIST_HEAD(list);
4566 LIST_HEAD(repoll); 4637 LIST_HEAD(repoll);
4567 void *have;
4568 4638
4569 local_irq_disable(); 4639 local_irq_disable();
4570 list_splice_init(&sd->poll_list, &list); 4640 list_splice_init(&sd->poll_list, &list);
4571 local_irq_enable(); 4641 local_irq_enable();
4572 4642
4573 while (!list_empty(&list)) { 4643 for (;;) {
4574 struct napi_struct *n; 4644 struct napi_struct *n;
4575 int work, weight;
4576
4577 /* If softirq window is exhausted then punt.
4578 * Allow this to run for 2 jiffies since which will allow
4579 * an average latency of 1.5/HZ.
4580 */
4581 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4582 goto softnet_break;
4583
4584 4645
4585 n = list_first_entry(&list, struct napi_struct, poll_list); 4646 if (list_empty(&list)) {
4586 list_del_init(&n->poll_list); 4647 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4587 4648 return;
4588 have = netpoll_poll_lock(n); 4649 break;
4589
4590 weight = n->weight;
4591
4592 /* This NAPI_STATE_SCHED test is for avoiding a race
4593 * with netpoll's poll_napi(). Only the entity which
4594 * obtains the lock and sees NAPI_STATE_SCHED set will
4595 * actually make the ->poll() call. Therefore we avoid
4596 * accidentally calling ->poll() when NAPI is not scheduled.
4597 */
4598 work = 0;
4599 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4600 work = n->poll(n, weight);
4601 trace_napi_poll(n);
4602 } 4650 }
4603 4651
4604 WARN_ON_ONCE(work > weight); 4652 n = list_first_entry(&list, struct napi_struct, poll_list);
4605 4653 budget -= napi_poll(n, &repoll);
4606 budget -= work;
4607 4654
4608 /* Drivers must not modify the NAPI state if they 4655 /* If softirq window is exhausted then punt.
4609 * consume the entire weight. In such cases this code 4656 * Allow this to run for 2 jiffies since which will allow
4610 * still "owns" the NAPI instance and therefore can 4657 * an average latency of 1.5/HZ.
4611 * move the instance around on the list at-will.
4612 */ 4658 */
4613 if (unlikely(work == weight)) { 4659 if (unlikely(budget <= 0 ||
4614 if (unlikely(napi_disable_pending(n))) { 4660 time_after_eq(jiffies, time_limit))) {
4615 napi_complete(n); 4661 sd->time_squeeze++;
4616 } else { 4662 break;
4617 if (n->gro_list) {
4618 /* flush too old packets
4619 * If HZ < 1000, flush all packets.
4620 */
4621 napi_gro_flush(n, HZ >= 1000);
4622 }
4623 list_add_tail(&n->poll_list, &repoll);
4624 }
4625 } 4663 }
4626
4627 netpoll_poll_unlock(have);
4628 } 4664 }
4629 4665
4630 if (!sd_has_rps_ipi_waiting(sd) &&
4631 list_empty(&list) &&
4632 list_empty(&repoll))
4633 return;
4634out:
4635 local_irq_disable(); 4666 local_irq_disable();
4636 4667
4637 list_splice_tail_init(&sd->poll_list, &list); 4668 list_splice_tail_init(&sd->poll_list, &list);
@@ -4641,12 +4672,6 @@ out:
4641 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4672 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4642 4673
4643 net_rps_action_and_irq_enable(sd); 4674 net_rps_action_and_irq_enable(sd);
4644
4645 return;
4646
4647softnet_break:
4648 sd->time_squeeze++;
4649 goto out;
4650} 4675}
4651 4676
4652struct netdev_adjacent { 4677struct netdev_adjacent {
@@ -7047,10 +7072,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
7047 oldsd->output_queue = NULL; 7072 oldsd->output_queue = NULL;
7048 oldsd->output_queue_tailp = &oldsd->output_queue; 7073 oldsd->output_queue_tailp = &oldsd->output_queue;
7049 } 7074 }
7050 /* Append NAPI poll list from offline CPU. */ 7075 /* Append NAPI poll list from offline CPU, with one exception :
7051 if (!list_empty(&oldsd->poll_list)) { 7076 * process_backlog() must be called by cpu owning percpu backlog.
7052 list_splice_init(&oldsd->poll_list, &sd->poll_list); 7077 * We properly handle process_queue & input_pkt_queue later.
7053 raise_softirq_irqoff(NET_RX_SOFTIRQ); 7078 */
7079 while (!list_empty(&oldsd->poll_list)) {
7080 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7081 struct napi_struct,
7082 poll_list);
7083
7084 list_del_init(&napi->poll_list);
7085 if (napi->poll == process_backlog)
7086 napi->state = 0;
7087 else
7088 ____napi_schedule(sd, napi);
7054 } 7089 }
7055 7090
7056 raise_softirq_irqoff(NET_TX_SOFTIRQ); 7091 raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -7061,7 +7096,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
7061 netif_rx_internal(skb); 7096 netif_rx_internal(skb);
7062 input_queue_head_incr(oldsd); 7097 input_queue_head_incr(oldsd);
7063 } 7098 }
7064 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 7099 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
7065 netif_rx_internal(skb); 7100 netif_rx_internal(skb);
7066 input_queue_head_incr(oldsd); 7101 input_queue_head_incr(oldsd);
7067 } 7102 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8e38f17288d3..8d614c93f86a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
2043 case NDTPA_BASE_REACHABLE_TIME: 2043 case NDTPA_BASE_REACHABLE_TIME:
2044 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2044 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2045 nla_get_msecs(tbp[i])); 2045 nla_get_msecs(tbp[i]));
2046 /* update reachable_time as well, otherwise, the change will
2047 * only be effective after the next time neigh_periodic_work
2048 * decides to recompute it (can be multiple minutes)
2049 */
2050 p->reachable_time =
2051 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2046 break; 2052 break;
2047 case NDTPA_GC_STALETIME: 2053 case NDTPA_GC_STALETIME:
2048 NEIGH_VAR_SET(p, GC_STALETIME, 2054 NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2921 return ret; 2927 return ret;
2922} 2928}
2923 2929
2930static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
2931 void __user *buffer,
2932 size_t *lenp, loff_t *ppos)
2933{
2934 struct neigh_parms *p = ctl->extra2;
2935 int ret;
2936
2937 if (strcmp(ctl->procname, "base_reachable_time") == 0)
2938 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2939 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
2940 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2941 else
2942 ret = -1;
2943
2944 if (write && ret == 0) {
2945 /* update reachable_time as well, otherwise, the change will
2946 * only be effective after the next time neigh_periodic_work
2947 * decides to recompute it
2948 */
2949 p->reachable_time =
2950 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2951 }
2952 return ret;
2953}
2954
2924#define NEIGH_PARMS_DATA_OFFSET(index) \ 2955#define NEIGH_PARMS_DATA_OFFSET(index) \
2925 (&((struct neigh_parms *) 0)->data[index]) 2956 (&((struct neigh_parms *) 0)->data[index])
2926 2957
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3047 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3078 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3048 /* ReachableTime (in milliseconds) */ 3079 /* ReachableTime (in milliseconds) */
3049 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3080 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3081 } else {
3082 /* Those handlers will update p->reachable_time after
3083 * base_reachable_time(_ms) is set to ensure the new timer starts being
3084 * applied after the next neighbour update instead of waiting for
3085 * neigh_periodic_work to update its value (can be multiple minutes)
3086 * So any handler that replaces them should do this as well
3087 */
3088 /* ReachableTime */
3089 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3090 neigh_proc_base_reachable_time;
3091 /* ReachableTime (in milliseconds) */
3092 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3093 neigh_proc_base_reachable_time;
3050 } 3094 }
3051 3095
3052 /* Don't export sysctls to unprivileged users */ 3096 /* Don't export sysctls to unprivileged users */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ae13ef6b3ea7..395c15b82087 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4148,6 +4148,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4148 skb->ignore_df = 0; 4148 skb->ignore_df = 0;
4149 skb_dst_drop(skb); 4149 skb_dst_drop(skb);
4150 skb->mark = 0; 4150 skb->mark = 0;
4151 skb_init_secmark(skb);
4151 secpath_reset(skb); 4152 secpath_reset(skb);
4152 nf_reset(skb); 4153 nf_reset(skb);
4153 nf_reset_trace(skb); 4154 nf_reset_trace(skb);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 95e47c97585e..394a200f93c1 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
122 int err; 122 int err;
123 123
124 skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); 124 skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
125 if (IS_ERR(skb))
126 return PTR_ERR(skb);
125 127
126 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 128 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
127 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) 129 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
128 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 130 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
129 131
130 err = skb_cow_head(skb, min_headroom); 132 err = skb_cow_head(skb, min_headroom);
131 if (unlikely(err)) 133 if (unlikely(err)) {
134 kfree_skb(skb);
132 return err; 135 return err;
136 }
133 137
134 skb = vlan_hwaccel_push_inside(skb); 138 skb = vlan_hwaccel_push_inside(skb);
135 if (unlikely(!skb)) 139 if (unlikely(!skb))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8a89c738b7a3..6b85adb05003 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -461,17 +461,13 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
461 461
462 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 462 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
463 sin = &errhdr.offender; 463 sin = &errhdr.offender;
464 sin->sin_family = AF_UNSPEC; 464 memset(sin, 0, sizeof(*sin));
465 465
466 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || 466 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
467 ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) { 467 ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) {
468 struct inet_sock *inet = inet_sk(sk);
469
470 sin->sin_family = AF_INET; 468 sin->sin_family = AF_INET;
471 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 469 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
472 sin->sin_port = 0; 470 if (inet_sk(sk)->cmsg_flags)
473 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
474 if (inet->cmsg_flags)
475 ip_cmsg_recv(msg, skb); 471 ip_cmsg_recv(msg, skb);
476 } 472 }
477 473
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index ff2d23d8c87a..6ecfce63201a 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
27 27
28 memset(&mr, 0, sizeof(mr)); 28 memset(&mr, 0, sizeof(mr));
29 if (priv->sreg_proto_min) { 29 if (priv->sreg_proto_min) {
30 mr.range[0].min.all = (__force __be16) 30 mr.range[0].min.all =
31 data[priv->sreg_proto_min].data[0]; 31 *(__be16 *)&data[priv->sreg_proto_min].data[0];
32 mr.range[0].max.all = (__force __be16) 32 mr.range[0].max.all =
33 data[priv->sreg_proto_max].data[0]; 33 *(__be16 *)&data[priv->sreg_proto_max].data[0];
34 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 34 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
35 } 35 }
36 36
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7f18262e2326..65caf8b95e17 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2019 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 2019 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
2020 break; 2020 break;
2021 2021
2022 if (tso_segs == 1) { 2022 if (tso_segs == 1 || !max_segs) {
2023 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2023 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2024 (tcp_skb_is_last(sk, skb) ? 2024 (tcp_skb_is_last(sk, skb) ?
2025 nonagle : TCP_NAGLE_PUSH)))) 2025 nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2032 } 2032 }
2033 2033
2034 limit = mss_now; 2034 limit = mss_now;
2035 if (tso_segs > 1 && !tcp_urg_mode(tp)) 2035 if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
2036 limit = tcp_mss_split_point(sk, skb, mss_now, 2036 limit = tcp_mss_split_point(sk, skb, mss_now,
2037 min_t(unsigned int, 2037 min_t(unsigned int,
2038 cwnd_quota, 2038 cwnd_quota,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 100c589a2a6c..49f5e73db122 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -393,11 +393,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
393 393
394 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 394 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
395 sin = &errhdr.offender; 395 sin = &errhdr.offender;
396 sin->sin6_family = AF_UNSPEC; 396 memset(sin, 0, sizeof(*sin));
397
397 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { 398 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
398 sin->sin6_family = AF_INET6; 399 sin->sin6_family = AF_INET6;
399 sin->sin6_flowinfo = 0;
400 sin->sin6_port = 0;
401 if (np->rxopt.all) { 400 if (np->rxopt.all) {
402 if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && 401 if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
403 serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) 402 serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
@@ -412,12 +411,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
412 ipv6_iface_scope_id(&sin->sin6_addr, 411 ipv6_iface_scope_id(&sin->sin6_addr,
413 IP6CB(skb)->iif); 412 IP6CB(skb)->iif);
414 } else { 413 } else {
415 struct inet_sock *inet = inet_sk(sk);
416
417 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 414 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
418 &sin->sin6_addr); 415 &sin->sin6_addr);
419 sin->sin6_scope_id = 0; 416 if (inet_sk(sk)->cmsg_flags)
420 if (inet->cmsg_flags)
421 ip_cmsg_recv(msg, skb); 417 ip_cmsg_recv(msg, skb);
422 } 418 }
423 } 419 }
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index 2433a6bfb191..11820b6b3613 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
27 27
28 memset(&range, 0, sizeof(range)); 28 memset(&range, 0, sizeof(range));
29 if (priv->sreg_proto_min) { 29 if (priv->sreg_proto_min) {
30 range.min_proto.all = (__force __be16) 30 range.min_proto.all =
31 data[priv->sreg_proto_min].data[0]; 31 *(__be16 *)&data[priv->sreg_proto_min].data[0];
32 range.max_proto.all = (__force __be16) 32 range.max_proto.all =
33 data[priv->sreg_proto_max].data[0]; 33 *(__be16 *)&data[priv->sreg_proto_max].data[0];
34 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 34 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
35 } 35 }
36 36
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c91083156edb..166e33bed222 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1160 struct net *net = dev_net(dst->dev); 1160 struct net *net = dev_net(dst->dev);
1161 1161
1162 rt6->rt6i_flags |= RTF_MODIFIED; 1162 rt6->rt6i_flags |= RTF_MODIFIED;
1163 if (mtu < IPV6_MIN_MTU) { 1163 if (mtu < IPV6_MIN_MTU)
1164 u32 features = dst_metric(dst, RTAX_FEATURES);
1165 mtu = IPV6_MIN_MTU; 1164 mtu = IPV6_MIN_MTU;
1166 features |= RTAX_FEATURE_ALLFRAG; 1165
1167 dst_metric_set(dst, RTAX_FEATURES, features);
1168 }
1169 dst_metric_set(dst, RTAX_MTU, mtu); 1166 dst_metric_set(dst, RTAX_MTU, mtu);
1170 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires); 1167 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1171 } 1168 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ff87805258e..9c0b54e87b47 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,6 +1387,28 @@ ipv6_pktoptions:
1387 return 0; 1387 return 0;
1388} 1388}
1389 1389
1390static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1391 const struct tcphdr *th)
1392{
1393 /* This is tricky: we move IP6CB at its correct location into
1394 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1395 * _decode_session6() uses IP6CB().
1396 * barrier() makes sure compiler won't play aliasing games.
1397 */
1398 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1399 sizeof(struct inet6_skb_parm));
1400 barrier();
1401
1402 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1403 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1404 skb->len - th->doff*4);
1405 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1406 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1407 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1408 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1409 TCP_SKB_CB(skb)->sacked = 0;
1410}
1411
1390static int tcp_v6_rcv(struct sk_buff *skb) 1412static int tcp_v6_rcv(struct sk_buff *skb)
1391{ 1413{
1392 const struct tcphdr *th; 1414 const struct tcphdr *th;
@@ -1418,24 +1440,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1418 1440
1419 th = tcp_hdr(skb); 1441 th = tcp_hdr(skb);
1420 hdr = ipv6_hdr(skb); 1442 hdr = ipv6_hdr(skb);
1421 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1422 * barrier() makes sure compiler wont play fool^Waliasing games.
1423 */
1424 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1425 sizeof(struct inet6_skb_parm));
1426 barrier();
1427
1428 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1429 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1430 skb->len - th->doff*4);
1431 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1432 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1433 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1434 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1435 TCP_SKB_CB(skb)->sacked = 0;
1436 1443
1437 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, 1444 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1438 tcp_v6_iif(skb)); 1445 inet6_iif(skb));
1439 if (!sk) 1446 if (!sk)
1440 goto no_tcp_socket; 1447 goto no_tcp_socket;
1441 1448
@@ -1451,6 +1458,8 @@ process:
1451 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1458 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1452 goto discard_and_relse; 1459 goto discard_and_relse;
1453 1460
1461 tcp_v6_fill_cb(skb, hdr, th);
1462
1454#ifdef CONFIG_TCP_MD5SIG 1463#ifdef CONFIG_TCP_MD5SIG
1455 if (tcp_v6_inbound_md5_hash(sk, skb)) 1464 if (tcp_v6_inbound_md5_hash(sk, skb))
1456 goto discard_and_relse; 1465 goto discard_and_relse;
@@ -1482,6 +1491,8 @@ no_tcp_socket:
1482 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1491 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1483 goto discard_it; 1492 goto discard_it;
1484 1493
1494 tcp_v6_fill_cb(skb, hdr, th);
1495
1485 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1496 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1486csum_error: 1497csum_error:
1487 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); 1498 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
@@ -1505,6 +1516,8 @@ do_time_wait:
1505 goto discard_it; 1516 goto discard_it;
1506 } 1517 }
1507 1518
1519 tcp_v6_fill_cb(skb, hdr, th);
1520
1508 if (skb->len < (th->doff<<2)) { 1521 if (skb->len < (th->doff<<2)) {
1509 inet_twsk_put(inet_twsk(sk)); 1522 inet_twsk_put(inet_twsk(sk));
1510 goto bad_packet; 1523 goto bad_packet;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0bb7038121ac..bd4e46ec32bd 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
140 if (!ret) { 140 if (!ret) {
141 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 141 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
142 142
143 if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 143 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
144 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
145 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
144 sdata->crypto_tx_tailroom_needed_cnt--; 146 sdata->crypto_tx_tailroom_needed_cnt--;
145 147
146 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && 148 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
188 sta = key->sta; 190 sta = key->sta;
189 sdata = key->sdata; 191 sdata = key->sdata;
190 192
191 if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 193 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
194 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
195 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
192 increment_tailroom_need_count(sdata); 196 increment_tailroom_need_count(sdata);
193 197
194 ret = drv_set_key(key->local, DISABLE_KEY, sdata, 198 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
884 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 888 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
885 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 889 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
886 890
887 if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 891 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
892 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
893 (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
888 increment_tailroom_need_count(key->sdata); 894 increment_tailroom_need_count(key->sdata);
889 } 895 }
890 896
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2c36c4765f47..837a406a9dd6 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1643,7 +1643,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
1643{ 1643{
1644 struct ieee80211_local *local = sdata->local; 1644 struct ieee80211_local *local = sdata->local;
1645 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1645 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1646 bool ret; 1646 bool ret = false;
1647 int ac; 1647 int ac;
1648 1648
1649 if (local->hw.queues < IEEE80211_NUM_ACS) 1649 if (local->hw.queues < IEEE80211_NUM_ACS)
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index ca27837974fe..349295d21946 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
31 SKB_GSO_TCPV6 | 31 SKB_GSO_TCPV6 |
32 SKB_GSO_UDP | 32 SKB_GSO_UDP |
33 SKB_GSO_DODGY | 33 SKB_GSO_DODGY |
34 SKB_GSO_TCP_ECN | 34 SKB_GSO_TCP_ECN)))
35 SKB_GSO_GRE |
36 SKB_GSO_GRE_CSUM |
37 SKB_GSO_IPIP)))
38 goto out; 35 goto out;
39 36
40 /* Setup inner SKB. */ 37 /* Setup inner SKB. */
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 1d5341f3761d..5d3daae98bf0 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
183 struct nf_conn *ct; 183 struct nf_conn *ct;
184 struct net *net; 184 struct net *net;
185 185
186 *diff = 0;
187
186#ifdef CONFIG_IP_VS_IPV6 188#ifdef CONFIG_IP_VS_IPV6
187 /* This application helper doesn't work with IPv6 yet, 189 /* This application helper doesn't work with IPv6 yet,
188 * so turn this into a no-op for IPv6 packets 190 * so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
191 return 1; 193 return 1;
192#endif 194#endif
193 195
194 *diff = 0;
195
196 /* Only useful for established sessions */ 196 /* Only useful for established sessions */
197 if (cp->state != IP_VS_TCP_S_ESTABLISHED) 197 if (cp->state != IP_VS_TCP_S_ESTABLISHED)
198 return 1; 198 return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
322 struct ip_vs_conn *n_cp; 322 struct ip_vs_conn *n_cp;
323 struct net *net; 323 struct net *net;
324 324
325 /* no diff required for incoming packets */
326 *diff = 0;
327
325#ifdef CONFIG_IP_VS_IPV6 328#ifdef CONFIG_IP_VS_IPV6
326 /* This application helper doesn't work with IPv6 yet, 329 /* This application helper doesn't work with IPv6 yet,
327 * so turn this into a no-op for IPv6 packets 330 * so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
330 return 1; 333 return 1;
331#endif 334#endif
332 335
333 /* no diff required for incoming packets */
334 *diff = 0;
335
336 /* Only useful for established sessions */ 336 /* Only useful for established sessions */
337 if (cp->state != IP_VS_TCP_S_ESTABLISHED) 337 if (cp->state != IP_VS_TCP_S_ESTABLISHED)
338 return 1; 338 return 1;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a11674806707..46d1b26a468e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
611 */ 611 */
612 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 612 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
613 pr_debug("Confirming conntrack %p\n", ct); 613 pr_debug("Confirming conntrack %p\n", ct);
614 /* We have to check the DYING flag inside the lock to prevent 614 /* We have to check the DYING flag after unlink to prevent
615 a race against nf_ct_get_next_corpse() possibly called from 615 * a race against nf_ct_get_next_corpse() possibly called from
616 user context, else we insert an already 'dead' hash, blocking 616 * user context, else we insert an already 'dead' hash, blocking
617 further use of that particular connection -JM */ 617 * further use of that particular connection -JM.
618 */
619 nf_ct_del_from_dying_or_unconfirmed_list(ct);
618 620
619 if (unlikely(nf_ct_is_dying(ct))) { 621 if (unlikely(nf_ct_is_dying(ct)))
620 nf_conntrack_double_unlock(hash, reply_hash); 622 goto out;
621 local_bh_enable();
622 return NF_ACCEPT;
623 }
624 623
625 /* See if there's one in the list already, including reverse: 624 /* See if there's one in the list already, including reverse:
626 NAT could have grabbed it without realizing, since we're 625 NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
636 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 635 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
637 goto out; 636 goto out;
638 637
639 nf_ct_del_from_dying_or_unconfirmed_list(ct);
640
641 /* Timer relative to confirmation time, not original 638 /* Timer relative to confirmation time, not original
642 setting time, otherwise we'd get timer wrap in 639 setting time, otherwise we'd get timer wrap in
643 weird delay cases. */ 640 weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
673 return NF_ACCEPT; 670 return NF_ACCEPT;
674 671
675out: 672out:
673 nf_ct_add_to_dying_list(ct);
676 nf_conntrack_double_unlock(hash, reply_hash); 674 nf_conntrack_double_unlock(hash, reply_hash);
677 NF_CT_STAT_INC(net, insert_failed); 675 NF_CT_STAT_INC(net, insert_failed);
678 local_bh_enable(); 676 local_bh_enable();
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 129a8daa4abf..3b3ddb4fb9ee 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
713 struct nft_chain *chain, *nc; 713 struct nft_chain *chain, *nc;
714 struct nft_set *set, *ns; 714 struct nft_set *set, *ns;
715 715
716 list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { 716 list_for_each_entry(chain, &ctx->table->chains, list) {
717 ctx->chain = chain; 717 ctx->chain = chain;
718 718
719 err = nft_delrule_by_chain(ctx); 719 err = nft_delrule_by_chain(ctx);
720 if (err < 0) 720 if (err < 0)
721 goto out; 721 goto out;
722
723 err = nft_delchain(ctx);
724 if (err < 0)
725 goto out;
726 } 722 }
727 723
728 list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { 724 list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
735 goto out; 731 goto out;
736 } 732 }
737 733
734 list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
735 ctx->chain = chain;
736
737 err = nft_delchain(ctx);
738 if (err < 0)
739 goto out;
740 }
741
738 err = nft_deltable(ctx); 742 err = nft_deltable(ctx);
739out: 743out:
740 return err; 744 return err;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 13c2e17bbe27..c421d94c4652 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -321,7 +321,8 @@ replay:
321 nlh = nlmsg_hdr(skb); 321 nlh = nlmsg_hdr(skb);
322 err = 0; 322 err = 0;
323 323
324 if (nlh->nlmsg_len < NLMSG_HDRLEN) { 324 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
325 skb->len < nlh->nlmsg_len) {
325 err = -EINVAL; 326 err = -EINVAL;
326 goto ack; 327 goto ack;
327 } 328 }
@@ -463,13 +464,13 @@ static void nfnetlink_rcv(struct sk_buff *skb)
463} 464}
464 465
465#ifdef CONFIG_MODULES 466#ifdef CONFIG_MODULES
466static int nfnetlink_bind(int group) 467static int nfnetlink_bind(struct net *net, int group)
467{ 468{
468 const struct nfnetlink_subsystem *ss; 469 const struct nfnetlink_subsystem *ss;
469 int type; 470 int type;
470 471
471 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 472 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
472 return -EINVAL; 473 return 0;
473 474
474 type = nfnl_group2type[group]; 475 type = nfnl_group2type[group];
475 476
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index afe2b0b45ec4..aff54fb1c8a0 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
65 } 65 }
66 66
67 if (priv->sreg_proto_min) { 67 if (priv->sreg_proto_min) {
68 range.min_proto.all = (__force __be16) 68 range.min_proto.all =
69 data[priv->sreg_proto_min].data[0]; 69 *(__be16 *)&data[priv->sreg_proto_min].data[0];
70 range.max_proto.all = (__force __be16) 70 range.max_proto.all =
71 data[priv->sreg_proto_max].data[0]; 71 *(__be16 *)&data[priv->sreg_proto_max].data[0];
72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
73 } 73 }
74 74
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 074cf3e91c6f..02fdde28dada 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -61,6 +61,7 @@
61#include <linux/rhashtable.h> 61#include <linux/rhashtable.h>
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h>
64 65
65#include <net/net_namespace.h> 66#include <net/net_namespace.h>
66#include <net/sock.h> 67#include <net/sock.h>
@@ -1091,8 +1092,12 @@ static void netlink_remove(struct sock *sk)
1091 mutex_unlock(&nl_sk_hash_lock); 1092 mutex_unlock(&nl_sk_hash_lock);
1092 1093
1093 netlink_table_grab(); 1094 netlink_table_grab();
1094 if (nlk_sk(sk)->subscriptions) 1095 if (nlk_sk(sk)->subscriptions) {
1095 __sk_del_bind_node(sk); 1096 __sk_del_bind_node(sk);
1097 netlink_update_listeners(sk);
1098 }
1099 if (sk->sk_protocol == NETLINK_GENERIC)
1100 atomic_inc(&genl_sk_destructing_cnt);
1096 netlink_table_ungrab(); 1101 netlink_table_ungrab();
1097} 1102}
1098 1103
@@ -1139,8 +1144,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
1139 struct module *module = NULL; 1144 struct module *module = NULL;
1140 struct mutex *cb_mutex; 1145 struct mutex *cb_mutex;
1141 struct netlink_sock *nlk; 1146 struct netlink_sock *nlk;
1142 int (*bind)(int group); 1147 int (*bind)(struct net *net, int group);
1143 void (*unbind)(int group); 1148 void (*unbind)(struct net *net, int group);
1144 int err = 0; 1149 int err = 0;
1145 1150
1146 sock->state = SS_UNCONNECTED; 1151 sock->state = SS_UNCONNECTED;
@@ -1209,6 +1214,20 @@ static int netlink_release(struct socket *sock)
1209 * will be purged. 1214 * will be purged.
1210 */ 1215 */
1211 1216
1217 /* must not acquire netlink_table_lock in any way again before unbind
1218 * and notifying genetlink is done as otherwise it might deadlock
1219 */
1220 if (nlk->netlink_unbind) {
1221 int i;
1222
1223 for (i = 0; i < nlk->ngroups; i++)
1224 if (test_bit(i, nlk->groups))
1225 nlk->netlink_unbind(sock_net(sk), i + 1);
1226 }
1227 if (sk->sk_protocol == NETLINK_GENERIC &&
1228 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1229 wake_up(&genl_sk_destructing_waitq);
1230
1212 sock->sk = NULL; 1231 sock->sk = NULL;
1213 wake_up_interruptible_all(&nlk->wait); 1232 wake_up_interruptible_all(&nlk->wait);
1214 1233
@@ -1226,8 +1245,8 @@ static int netlink_release(struct socket *sock)
1226 1245
1227 module_put(nlk->module); 1246 module_put(nlk->module);
1228 1247
1229 netlink_table_grab();
1230 if (netlink_is_kernel(sk)) { 1248 if (netlink_is_kernel(sk)) {
1249 netlink_table_grab();
1231 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 1250 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1232 if (--nl_table[sk->sk_protocol].registered == 0) { 1251 if (--nl_table[sk->sk_protocol].registered == 0) {
1233 struct listeners *old; 1252 struct listeners *old;
@@ -1241,10 +1260,8 @@ static int netlink_release(struct socket *sock)
1241 nl_table[sk->sk_protocol].flags = 0; 1260 nl_table[sk->sk_protocol].flags = 0;
1242 nl_table[sk->sk_protocol].registered = 0; 1261 nl_table[sk->sk_protocol].registered = 0;
1243 } 1262 }
1244 } else if (nlk->subscriptions) { 1263 netlink_table_ungrab();
1245 netlink_update_listeners(sk);
1246 } 1264 }
1247 netlink_table_ungrab();
1248 1265
1249 kfree(nlk->groups); 1266 kfree(nlk->groups);
1250 nlk->groups = NULL; 1267 nlk->groups = NULL;
@@ -1410,9 +1427,10 @@ static int netlink_realloc_groups(struct sock *sk)
1410 return err; 1427 return err;
1411} 1428}
1412 1429
1413static void netlink_unbind(int group, long unsigned int groups, 1430static void netlink_undo_bind(int group, long unsigned int groups,
1414 struct netlink_sock *nlk) 1431 struct sock *sk)
1415{ 1432{
1433 struct netlink_sock *nlk = nlk_sk(sk);
1416 int undo; 1434 int undo;
1417 1435
1418 if (!nlk->netlink_unbind) 1436 if (!nlk->netlink_unbind)
@@ -1420,7 +1438,7 @@ static void netlink_unbind(int group, long unsigned int groups,
1420 1438
1421 for (undo = 0; undo < group; undo++) 1439 for (undo = 0; undo < group; undo++)
1422 if (test_bit(undo, &groups)) 1440 if (test_bit(undo, &groups))
1423 nlk->netlink_unbind(undo); 1441 nlk->netlink_unbind(sock_net(sk), undo);
1424} 1442}
1425 1443
1426static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1444static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1458,10 +1476,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1458 for (group = 0; group < nlk->ngroups; group++) { 1476 for (group = 0; group < nlk->ngroups; group++) {
1459 if (!test_bit(group, &groups)) 1477 if (!test_bit(group, &groups))
1460 continue; 1478 continue;
1461 err = nlk->netlink_bind(group); 1479 err = nlk->netlink_bind(net, group);
1462 if (!err) 1480 if (!err)
1463 continue; 1481 continue;
1464 netlink_unbind(group, groups, nlk); 1482 netlink_undo_bind(group, groups, sk);
1465 return err; 1483 return err;
1466 } 1484 }
1467 } 1485 }
@@ -1471,7 +1489,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1471 netlink_insert(sk, net, nladdr->nl_pid) : 1489 netlink_insert(sk, net, nladdr->nl_pid) :
1472 netlink_autobind(sock); 1490 netlink_autobind(sock);
1473 if (err) { 1491 if (err) {
1474 netlink_unbind(nlk->ngroups, groups, nlk); 1492 netlink_undo_bind(nlk->ngroups, groups, sk);
1475 return err; 1493 return err;
1476 } 1494 }
1477 } 1495 }
@@ -2122,7 +2140,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2122 if (!val || val - 1 >= nlk->ngroups) 2140 if (!val || val - 1 >= nlk->ngroups)
2123 return -EINVAL; 2141 return -EINVAL;
2124 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { 2142 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2125 err = nlk->netlink_bind(val); 2143 err = nlk->netlink_bind(sock_net(sk), val);
2126 if (err) 2144 if (err)
2127 return err; 2145 return err;
2128 } 2146 }
@@ -2131,7 +2149,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2131 optname == NETLINK_ADD_MEMBERSHIP); 2149 optname == NETLINK_ADD_MEMBERSHIP);
2132 netlink_table_ungrab(); 2150 netlink_table_ungrab();
2133 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) 2151 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2134 nlk->netlink_unbind(val); 2152 nlk->netlink_unbind(sock_net(sk), val);
2135 2153
2136 err = 0; 2154 err = 0;
2137 break; 2155 break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index b20a1731759b..f1c31b39aa3e 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -2,6 +2,7 @@
2#define _AF_NETLINK_H 2#define _AF_NETLINK_H
3 3
4#include <linux/rhashtable.h> 4#include <linux/rhashtable.h>
5#include <linux/atomic.h>
5#include <net/sock.h> 6#include <net/sock.h>
6 7
7#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 8#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -39,8 +40,8 @@ struct netlink_sock {
39 struct mutex *cb_mutex; 40 struct mutex *cb_mutex;
40 struct mutex cb_def_mutex; 41 struct mutex cb_def_mutex;
41 void (*netlink_rcv)(struct sk_buff *skb); 42 void (*netlink_rcv)(struct sk_buff *skb);
42 int (*netlink_bind)(int group); 43 int (*netlink_bind)(struct net *net, int group);
43 void (*netlink_unbind)(int group); 44 void (*netlink_unbind)(struct net *net, int group);
44 struct module *module; 45 struct module *module;
45#ifdef CONFIG_NETLINK_MMAP 46#ifdef CONFIG_NETLINK_MMAP
46 struct mutex pg_vec_lock; 47 struct mutex pg_vec_lock;
@@ -65,8 +66,8 @@ struct netlink_table {
65 unsigned int groups; 66 unsigned int groups;
66 struct mutex *cb_mutex; 67 struct mutex *cb_mutex;
67 struct module *module; 68 struct module *module;
68 int (*bind)(int group); 69 int (*bind)(struct net *net, int group);
69 void (*unbind)(int group); 70 void (*unbind)(struct net *net, int group);
70 bool (*compare)(struct net *net, struct sock *sock); 71 bool (*compare)(struct net *net, struct sock *sock);
71 int registered; 72 int registered;
72}; 73};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 76393f2f4b22..ee57459fc258 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -23,6 +23,9 @@
23static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 23static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
24static DECLARE_RWSEM(cb_lock); 24static DECLARE_RWSEM(cb_lock);
25 25
26atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
27DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
28
26void genl_lock(void) 29void genl_lock(void)
27{ 30{
28 mutex_lock(&genl_mutex); 31 mutex_lock(&genl_mutex);
@@ -435,15 +438,18 @@ int genl_unregister_family(struct genl_family *family)
435 438
436 genl_lock_all(); 439 genl_lock_all();
437 440
438 genl_unregister_mc_groups(family);
439
440 list_for_each_entry(rc, genl_family_chain(family->id), family_list) { 441 list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
441 if (family->id != rc->id || strcmp(rc->name, family->name)) 442 if (family->id != rc->id || strcmp(rc->name, family->name))
442 continue; 443 continue;
443 444
445 genl_unregister_mc_groups(family);
446
444 list_del(&rc->family_list); 447 list_del(&rc->family_list);
445 family->n_ops = 0; 448 family->n_ops = 0;
446 genl_unlock_all(); 449 up_write(&cb_lock);
450 wait_event(genl_sk_destructing_waitq,
451 atomic_read(&genl_sk_destructing_cnt) == 0);
452 genl_unlock();
447 453
448 kfree(family->attrbuf); 454 kfree(family->attrbuf);
449 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); 455 genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
@@ -983,11 +989,63 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
983 { .name = "notify", }, 989 { .name = "notify", },
984}; 990};
985 991
992static int genl_bind(struct net *net, int group)
993{
994 int i, err = -ENOENT;
995
996 down_read(&cb_lock);
997 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
998 struct genl_family *f;
999
1000 list_for_each_entry(f, genl_family_chain(i), family_list) {
1001 if (group >= f->mcgrp_offset &&
1002 group < f->mcgrp_offset + f->n_mcgrps) {
1003 int fam_grp = group - f->mcgrp_offset;
1004
1005 if (!f->netnsok && net != &init_net)
1006 err = -ENOENT;
1007 else if (f->mcast_bind)
1008 err = f->mcast_bind(net, fam_grp);
1009 else
1010 err = 0;
1011 break;
1012 }
1013 }
1014 }
1015 up_read(&cb_lock);
1016
1017 return err;
1018}
1019
1020static void genl_unbind(struct net *net, int group)
1021{
1022 int i;
1023
1024 down_read(&cb_lock);
1025 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
1026 struct genl_family *f;
1027
1028 list_for_each_entry(f, genl_family_chain(i), family_list) {
1029 if (group >= f->mcgrp_offset &&
1030 group < f->mcgrp_offset + f->n_mcgrps) {
1031 int fam_grp = group - f->mcgrp_offset;
1032
1033 if (f->mcast_unbind)
1034 f->mcast_unbind(net, fam_grp);
1035 break;
1036 }
1037 }
1038 }
1039 up_read(&cb_lock);
1040}
1041
986static int __net_init genl_pernet_init(struct net *net) 1042static int __net_init genl_pernet_init(struct net *net)
987{ 1043{
988 struct netlink_kernel_cfg cfg = { 1044 struct netlink_kernel_cfg cfg = {
989 .input = genl_rcv, 1045 .input = genl_rcv,
990 .flags = NL_CFG_F_NONROOT_RECV, 1046 .flags = NL_CFG_F_NONROOT_RECV,
1047 .bind = genl_bind,
1048 .unbind = genl_unbind,
991 }; 1049 };
992 1050
993 /* we'll bump the group number right afterwards */ 1051 /* we'll bump the group number right afterwards */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 764fdc39c63b..770064c83711 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
147 hdr = eth_hdr(skb); 147 hdr = eth_hdr(skb);
148 hdr->h_proto = mpls->mpls_ethertype; 148 hdr->h_proto = mpls->mpls_ethertype;
149 149
150 skb_set_inner_protocol(skb, skb->protocol); 150 if (!skb->inner_protocol)
151 skb_set_inner_protocol(skb, skb->protocol);
151 skb->protocol = mpls->mpls_ethertype; 152 skb->protocol = mpls->mpls_ethertype;
152 153
153 invalidate_flow_key(key); 154 invalidate_flow_key(key);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 332b5a031739..b07349e82d78 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -83,8 +83,7 @@ static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
83 unsigned int group) 83 unsigned int group)
84{ 84{
85 return info->nlhdr->nlmsg_flags & NLM_F_ECHO || 85 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
86 genl_has_listeners(family, genl_info_net(info)->genl_sock, 86 genl_has_listeners(family, genl_info_net(info), group);
87 group);
88} 87}
89 88
90static void ovs_notify(struct genl_family *family, 89static void ovs_notify(struct genl_family *family,
@@ -525,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
525 struct vport *input_vport; 524 struct vport *input_vport;
526 int len; 525 int len;
527 int err; 526 int err;
528 bool log = !a[OVS_FLOW_ATTR_PROBE]; 527 bool log = !a[OVS_PACKET_ATTR_PROBE];
529 528
530 err = -EINVAL; 529 err = -EINVAL;
531 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || 530 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -611,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
611 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, 610 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
612 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, 611 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
613 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 612 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
613 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
614}; 614};
615 615
616static const struct genl_ops dp_packet_genl_ops[] = { 616static const struct genl_ops dp_packet_genl_ops[] = {
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 70bef2ab7f2b..da2fae0873a5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
70{ 70{
71 struct flow_stats *stats; 71 struct flow_stats *stats;
72 int node = numa_node_id(); 72 int node = numa_node_id();
73 int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
73 74
74 stats = rcu_dereference(flow->stats[node]); 75 stats = rcu_dereference(flow->stats[node]);
75 76
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
105 if (likely(new_stats)) { 106 if (likely(new_stats)) {
106 new_stats->used = jiffies; 107 new_stats->used = jiffies;
107 new_stats->packet_count = 1; 108 new_stats->packet_count = 1;
108 new_stats->byte_count = skb->len; 109 new_stats->byte_count = len;
109 new_stats->tcp_flags = tcp_flags; 110 new_stats->tcp_flags = tcp_flags;
110 spin_lock_init(&new_stats->lock); 111 spin_lock_init(&new_stats->lock);
111 112
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
120 121
121 stats->used = jiffies; 122 stats->used = jiffies;
122 stats->packet_count++; 123 stats->packet_count++;
123 stats->byte_count += skb->len; 124 stats->byte_count += len;
124 stats->tcp_flags |= tcp_flags; 125 stats->tcp_flags |= tcp_flags;
125unlock: 126unlock:
126 spin_unlock(&stats->lock); 127 spin_unlock(&stats->lock);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 9645a21d9eaa..d1eecf707613 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1753 __be16 eth_type, __be16 vlan_tci, bool log) 1753 __be16 eth_type, __be16 vlan_tci, bool log)
1754{ 1754{
1755 const struct nlattr *a; 1755 const struct nlattr *a;
1756 bool out_tnl_port = false;
1757 int rem, err; 1756 int rem, err;
1758 1757
1759 if (depth >= SAMPLE_ACTION_DEPTH) 1758 if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1796 case OVS_ACTION_ATTR_OUTPUT: 1795 case OVS_ACTION_ATTR_OUTPUT:
1797 if (nla_get_u32(a) >= DP_MAX_PORTS) 1796 if (nla_get_u32(a) >= DP_MAX_PORTS)
1798 return -EINVAL; 1797 return -EINVAL;
1799 out_tnl_port = false;
1800
1801 break; 1798 break;
1802 1799
1803 case OVS_ACTION_ATTR_HASH: { 1800 case OVS_ACTION_ATTR_HASH: {
@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1832 case OVS_ACTION_ATTR_PUSH_MPLS: { 1829 case OVS_ACTION_ATTR_PUSH_MPLS: {
1833 const struct ovs_action_push_mpls *mpls = nla_data(a); 1830 const struct ovs_action_push_mpls *mpls = nla_data(a);
1834 1831
1835 /* Networking stack do not allow simultaneous Tunnel
1836 * and MPLS GSO.
1837 */
1838 if (out_tnl_port)
1839 return -EINVAL;
1840
1841 if (!eth_p_mpls(mpls->mpls_ethertype)) 1832 if (!eth_p_mpls(mpls->mpls_ethertype))
1842 return -EINVAL; 1833 return -EINVAL;
1843 /* Prohibit push MPLS other than to a white list 1834 /* Prohibit push MPLS other than to a white list
@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
1873 1864
1874 case OVS_ACTION_ATTR_SET: 1865 case OVS_ACTION_ATTR_SET:
1875 err = validate_set(a, key, sfa, 1866 err = validate_set(a, key, sfa,
1876 &out_tnl_port, eth_type, log); 1867 &skip_copy, eth_type, log);
1877 if (err) 1868 if (err)
1878 return err; 1869 return err;
1879
1880 skip_copy = out_tnl_port;
1881 break; 1870 break;
1882 1871
1883 case OVS_ACTION_ATTR_SAMPLE: 1872 case OVS_ACTION_ATTR_SAMPLE:
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 347fa2325b22..484864dd0e68 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
219 false); 219 false);
220 if (err < 0) 220 if (err < 0)
221 ip_rt_put(rt); 221 ip_rt_put(rt);
222 return err;
223
222error: 224error:
225 kfree_skb(skb);
223 return err; 226 return err;
224} 227}
225 228
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 6b69df545b1d..d4168c442db5 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
73 73
74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); 74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
75 if (IS_ERR(skb)) 75 if (IS_ERR(skb))
76 return NULL; 76 return skb;
77 77
78 tpi.flags = filter_tnl_flags(tun_key->tun_flags); 78 tpi.flags = filter_tnl_flags(tun_key->tun_flags);
79 tpi.proto = htons(ETH_P_TEB); 79 tpi.proto = htons(ETH_P_TEB);
@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
144 144
145 if (unlikely(!OVS_CB(skb)->egress_tun_info)) { 145 if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
146 err = -EINVAL; 146 err = -EINVAL;
147 goto error; 147 goto err_free_skb;
148 } 148 }
149 149
150 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; 150 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
157 fl.flowi4_proto = IPPROTO_GRE; 157 fl.flowi4_proto = IPPROTO_GRE;
158 158
159 rt = ip_route_output_key(net, &fl); 159 rt = ip_route_output_key(net, &fl);
160 if (IS_ERR(rt)) 160 if (IS_ERR(rt)) {
161 return PTR_ERR(rt); 161 err = PTR_ERR(rt);
162 goto err_free_skb;
163 }
162 164
163 tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); 165 tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
164 166
@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
183 185
184 /* Push Tunnel header. */ 186 /* Push Tunnel header. */
185 skb = __build_header(skb, tunnel_hlen); 187 skb = __build_header(skb, tunnel_hlen);
186 if (unlikely(!skb)) { 188 if (IS_ERR(skb)) {
187 err = 0; 189 err = PTR_ERR(skb);
190 skb = NULL;
188 goto err_free_rt; 191 goto err_free_rt;
189 } 192 }
190 193
@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
198 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); 201 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
199err_free_rt: 202err_free_rt:
200 ip_rt_put(rt); 203 ip_rt_put(rt);
201error: 204err_free_skb:
205 kfree_skb(skb);
202 return err; 206 return err;
203} 207}
204 208
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 38f95a52241b..d7c46b301024 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
187 false); 187 false);
188 if (err < 0) 188 if (err < 0)
189 ip_rt_put(rt); 189 ip_rt_put(rt);
190 return err;
190error: 191error:
192 kfree_skb(skb);
191 return err; 193 return err;
192} 194}
193 195
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 9584526c0778..2034c6d9cb5a 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
480 stats = this_cpu_ptr(vport->percpu_stats); 480 stats = this_cpu_ptr(vport->percpu_stats);
481 u64_stats_update_begin(&stats->syncp); 481 u64_stats_update_begin(&stats->syncp);
482 stats->rx_packets++; 482 stats->rx_packets++;
483 stats->rx_bytes += skb->len; 483 stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
484 u64_stats_update_end(&stats->syncp); 484 u64_stats_update_end(&stats->syncp);
485 485
486 OVS_CB(skb)->input_vport = vport; 486 OVS_CB(skb)->input_vport = vport;
@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
519 u64_stats_update_end(&stats->syncp); 519 u64_stats_update_end(&stats->syncp);
520 } else if (sent < 0) { 520 } else if (sent < 0) {
521 ovs_vport_record_error(vport, VPORT_E_TX_ERROR); 521 ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
522 kfree_skb(skb); 522 } else {
523 } else
524 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); 523 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
525 524 }
526 return sent; 525 return sent;
527} 526}
528 527
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e52a44785681..9cfe2e1dd8b5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -785,6 +785,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
785 785
786 struct tpacket3_hdr *last_pkt; 786 struct tpacket3_hdr *last_pkt;
787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
788 struct sock *sk = &po->sk;
788 789
789 if (po->stats.stats3.tp_drops) 790 if (po->stats.stats3.tp_drops)
790 status |= TP_STATUS_LOSING; 791 status |= TP_STATUS_LOSING;
@@ -809,6 +810,8 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
809 /* Flush the block */ 810 /* Flush the block */
810 prb_flush_block(pkc1, pbd1, status); 811 prb_flush_block(pkc1, pbd1, status);
811 812
813 sk->sk_data_ready(sk);
814
812 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 815 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
813} 816}
814 817
@@ -2052,12 +2055,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2052 smp_wmb(); 2055 smp_wmb();
2053#endif 2056#endif
2054 2057
2055 if (po->tp_version <= TPACKET_V2) 2058 if (po->tp_version <= TPACKET_V2) {
2056 __packet_set_status(po, h.raw, status); 2059 __packet_set_status(po, h.raw, status);
2057 else 2060 sk->sk_data_ready(sk);
2061 } else {
2058 prb_clear_blk_fill_status(&po->rx_ring); 2062 prb_clear_blk_fill_status(&po->rx_ring);
2059 2063 }
2060 sk->sk_data_ready(sk);
2061 2064
2062drop_n_restore: 2065drop_n_restore:
2063 if (skb_head != skb->data && skb_shared(skb)) { 2066 if (skb_head != skb->data && skb_shared(skb)) {
@@ -2514,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2514 err = -EINVAL; 2517 err = -EINVAL;
2515 if (sock->type == SOCK_DGRAM) { 2518 if (sock->type == SOCK_DGRAM) {
2516 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2519 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2517 if (unlikely(offset) < 0) 2520 if (unlikely(offset < 0))
2518 goto out_free; 2521 goto out_free;
2519 } else { 2522 } else {
2520 if (ll_header_truncated(dev, len)) 2523 if (ll_header_truncated(dev, len))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2625eccb77d5..aafe94bf292e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1603,7 +1603,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1603 sctp_assoc_t associd = 0; 1603 sctp_assoc_t associd = 0;
1604 sctp_cmsgs_t cmsgs = { NULL }; 1604 sctp_cmsgs_t cmsgs = { NULL };
1605 sctp_scope_t scope; 1605 sctp_scope_t scope;
1606 bool fill_sinfo_ttl = false; 1606 bool fill_sinfo_ttl = false, wait_connect = false;
1607 struct sctp_datamsg *datamsg; 1607 struct sctp_datamsg *datamsg;
1608 int msg_flags = msg->msg_flags; 1608 int msg_flags = msg->msg_flags;
1609 __u16 sinfo_flags = 0; 1609 __u16 sinfo_flags = 0;
@@ -1943,6 +1943,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1943 if (err < 0) 1943 if (err < 0)
1944 goto out_free; 1944 goto out_free;
1945 1945
1946 wait_connect = true;
1946 pr_debug("%s: we associated primitively\n", __func__); 1947 pr_debug("%s: we associated primitively\n", __func__);
1947 } 1948 }
1948 1949
@@ -1980,6 +1981,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1980 sctp_datamsg_put(datamsg); 1981 sctp_datamsg_put(datamsg);
1981 err = msg_len; 1982 err = msg_len;
1982 1983
1984 if (unlikely(wait_connect)) {
1985 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
1986 sctp_wait_for_connect(asoc, &timeo);
1987 }
1988
1983 /* If we are already past ASSOCIATE, the lower 1989 /* If we are already past ASSOCIATE, the lower
1984 * layers are responsible for association cleanup. 1990 * layers are responsible for association cleanup.
1985 */ 1991 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 1cb61242e55e..4439ac4c1b53 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
606 struct kvec *head = buf->head; 606 struct kvec *head = buf->head;
607 struct kvec *tail = buf->tail; 607 struct kvec *tail = buf->tail;
608 int fraglen; 608 int fraglen;
609 int new, old; 609 int new;
610 610
611 if (len > buf->len) { 611 if (len > buf->len) {
612 WARN_ON_ONCE(1); 612 WARN_ON_ONCE(1);
@@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
629 buf->len -= fraglen; 629 buf->len -= fraglen;
630 630
631 new = buf->page_base + buf->page_len; 631 new = buf->page_base + buf->page_len;
632 old = new + fraglen; 632
633 xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); 633 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
634 634
635 if (buf->page_len) { 635 if (buf->page_len) {
636 xdr->p = page_address(*xdr->page_ptr); 636 xdr->p = page_address(*xdr->page_ptr);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 96ceefeb9daf..a9e174fc0f91 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
220 struct sk_buff *skb; 220 struct sk_buff *skb;
221 221
222 skb_queue_walk(&bcl->outqueue, skb) { 222 skb_queue_walk(&bcl->outqueue, skb) {
223 if (more(buf_seqno(skb), after)) 223 if (more(buf_seqno(skb), after)) {
224 tipc_link_retransmit(bcl, skb, mod(to - after));
224 break; 225 break;
226 }
225 } 227 }
226 tipc_link_retransmit(bcl, skb, mod(to - after));
227} 228}
228 229
229/** 230/**
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 22ba971741e5..29c8675f9a11 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB
175 Most distributions have a CRDA package. So if unsure, say N. 175 Most distributions have a CRDA package. So if unsure, say N.
176 176
177config CFG80211_WEXT 177config CFG80211_WEXT
178 bool 178 bool "cfg80211 wireless extensions compatibility"
179 depends on CFG80211 179 depends on CFG80211
180 select WEXT_CORE 180 select WEXT_CORE
181 help 181 help
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7b8309840d4e..d39d1cbc86b1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1530,45 +1530,40 @@ static void reg_call_notifier(struct wiphy *wiphy,
1530 1530
1531static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) 1531static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1532{ 1532{
1533 struct ieee80211_channel *ch;
1534 struct cfg80211_chan_def chandef; 1533 struct cfg80211_chan_def chandef;
1535 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 1534 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
1536 bool ret = true; 1535 enum nl80211_iftype iftype;
1537 1536
1538 wdev_lock(wdev); 1537 wdev_lock(wdev);
1538 iftype = wdev->iftype;
1539 1539
1540 /* make sure the interface is active */
1540 if (!wdev->netdev || !netif_running(wdev->netdev)) 1541 if (!wdev->netdev || !netif_running(wdev->netdev))
1541 goto out; 1542 goto wdev_inactive_unlock;
1542 1543
1543 switch (wdev->iftype) { 1544 switch (iftype) {
1544 case NL80211_IFTYPE_AP: 1545 case NL80211_IFTYPE_AP:
1545 case NL80211_IFTYPE_P2P_GO: 1546 case NL80211_IFTYPE_P2P_GO:
1546 if (!wdev->beacon_interval) 1547 if (!wdev->beacon_interval)
1547 goto out; 1548 goto wdev_inactive_unlock;
1548 1549 chandef = wdev->chandef;
1549 ret = cfg80211_reg_can_beacon(wiphy,
1550 &wdev->chandef, wdev->iftype);
1551 break; 1550 break;
1552 case NL80211_IFTYPE_ADHOC: 1551 case NL80211_IFTYPE_ADHOC:
1553 if (!wdev->ssid_len) 1552 if (!wdev->ssid_len)
1554 goto out; 1553 goto wdev_inactive_unlock;
1555 1554 chandef = wdev->chandef;
1556 ret = cfg80211_reg_can_beacon(wiphy,
1557 &wdev->chandef, wdev->iftype);
1558 break; 1555 break;
1559 case NL80211_IFTYPE_STATION: 1556 case NL80211_IFTYPE_STATION:
1560 case NL80211_IFTYPE_P2P_CLIENT: 1557 case NL80211_IFTYPE_P2P_CLIENT:
1561 if (!wdev->current_bss || 1558 if (!wdev->current_bss ||
1562 !wdev->current_bss->pub.channel) 1559 !wdev->current_bss->pub.channel)
1563 goto out; 1560 goto wdev_inactive_unlock;
1564 1561
1565 ch = wdev->current_bss->pub.channel; 1562 if (!rdev->ops->get_channel ||
1566 if (rdev->ops->get_channel && 1563 rdev_get_channel(rdev, wdev, &chandef))
1567 !rdev_get_channel(rdev, wdev, &chandef)) 1564 cfg80211_chandef_create(&chandef,
1568 ret = cfg80211_chandef_usable(wiphy, &chandef, 1565 wdev->current_bss->pub.channel,
1569 IEEE80211_CHAN_DISABLED); 1566 NL80211_CHAN_NO_HT);
1570 else
1571 ret = !(ch->flags & IEEE80211_CHAN_DISABLED);
1572 break; 1567 break;
1573 case NL80211_IFTYPE_MONITOR: 1568 case NL80211_IFTYPE_MONITOR:
1574 case NL80211_IFTYPE_AP_VLAN: 1569 case NL80211_IFTYPE_AP_VLAN:
@@ -1581,9 +1576,26 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1581 break; 1576 break;
1582 } 1577 }
1583 1578
1584out:
1585 wdev_unlock(wdev); 1579 wdev_unlock(wdev);
1586 return ret; 1580
1581 switch (iftype) {
1582 case NL80211_IFTYPE_AP:
1583 case NL80211_IFTYPE_P2P_GO:
1584 case NL80211_IFTYPE_ADHOC:
1585 return cfg80211_reg_can_beacon(wiphy, &chandef, iftype);
1586 case NL80211_IFTYPE_STATION:
1587 case NL80211_IFTYPE_P2P_CLIENT:
1588 return cfg80211_chandef_usable(wiphy, &chandef,
1589 IEEE80211_CHAN_DISABLED);
1590 default:
1591 break;
1592 }
1593
1594 return true;
1595
1596wdev_inactive_unlock:
1597 wdev_unlock(wdev);
1598 return true;
1587} 1599}
1588 1600
1589static void reg_leave_invalid_chans(struct wiphy *wiphy) 1601static void reg_leave_invalid_chans(struct wiphy *wiphy)
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 1bca180db8ad..627f8cbbedb8 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \
42 42
43__clean-files := $(filter-out $(no-clean-files), $(__clean-files)) 43__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
44 44
45# as clean-files is given relative to the current directory, this adds 45# clean-files is given relative to the current directory, unless it
46# a $(obj) prefix, except for absolute paths 46# starts with $(objtree)/ (which means "./", so do not add "./" unless
47# you want to delete a file from the toplevel object directory).
47 48
48__clean-files := $(wildcard \ 49__clean-files := $(wildcard \
49 $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \ 50 $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
50 $(filter /%, $(__clean-files))) 51 $(filter $(objtree)/%, $(__clean-files)))
51 52
52# as clean-dirs is given relative to the current directory, this adds 53# same as clean-files
53# a $(obj) prefix, except for absolute paths
54 54
55__clean-dirs := $(wildcard \ 55__clean-dirs := $(wildcard \
56 $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \ 56 $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \
57 $(filter /%, $(clean-dirs))) 57 $(filter $(objtree)/%, $(clean-dirs)))
58 58
59# ========================================================================== 59# ==========================================================================
60 60
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9609a7f0faea..c7952375ac53 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
148 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 148 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
149 atomic_dec(&key->user->nikeys); 149 atomic_dec(&key->user->nikeys);
150 150
151 key_user_put(key->user);
152
153 /* now throw away the key memory */ 151 /* now throw away the key memory */
154 if (key->type->destroy) 152 if (key->type->destroy)
155 key->type->destroy(key); 153 key->type->destroy(key);
156 154
155 key_user_put(key->user);
156
157 kfree(key->description); 157 kfree(key->description);
158 158
159#ifdef KEY_DEBUGGING 159#ifdef KEY_DEBUGGING
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index 255dabc6fc33..2a85e4209f0b 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
124 spin_lock_irq(&efw->lock); 124 spin_lock_irq(&efw->lock);
125 125
126 t = (struct snd_efw_transaction *)data; 126 t = (struct snd_efw_transaction *)data;
127 length = min_t(size_t, t->length * sizeof(t->length), length); 127 length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
128 128
129 if (efw->push_ptr < efw->pull_ptr) 129 if (efw->push_ptr < efw->pull_ptr)
130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); 130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8276a743e22e..0cfc9c8c4b4e 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1922,10 +1922,18 @@ int azx_mixer_create(struct azx *chip)
1922EXPORT_SYMBOL_GPL(azx_mixer_create); 1922EXPORT_SYMBOL_GPL(azx_mixer_create);
1923 1923
1924 1924
1925static bool is_input_stream(struct azx *chip, unsigned char index)
1926{
1927 return (index >= chip->capture_index_offset &&
1928 index < chip->capture_index_offset + chip->capture_streams);
1929}
1930
1925/* initialize SD streams */ 1931/* initialize SD streams */
1926int azx_init_stream(struct azx *chip) 1932int azx_init_stream(struct azx *chip)
1927{ 1933{
1928 int i; 1934 int i;
1935 int in_stream_tag = 0;
1936 int out_stream_tag = 0;
1929 1937
1930 /* initialize each stream (aka device) 1938 /* initialize each stream (aka device)
1931 * assign the starting bdl address to each stream (device) 1939 * assign the starting bdl address to each stream (device)
@@ -1938,9 +1946,21 @@ int azx_init_stream(struct azx *chip)
1938 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80); 1946 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
1939 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */ 1947 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
1940 azx_dev->sd_int_sta_mask = 1 << i; 1948 azx_dev->sd_int_sta_mask = 1 << i;
1941 /* stream tag: must be non-zero and unique */
1942 azx_dev->index = i; 1949 azx_dev->index = i;
1943 azx_dev->stream_tag = i + 1; 1950
1951 /* stream tag must be unique throughout
1952 * the stream direction group,
1953 * valid values 1...15
1954 * use separate stream tag if the flag
1955 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
1956 */
1957 if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
1958 azx_dev->stream_tag =
1959 is_input_stream(chip, i) ?
1960 ++in_stream_tag :
1961 ++out_stream_tag;
1962 else
1963 azx_dev->stream_tag = i + 1;
1944 } 1964 }
1945 1965
1946 return 0; 1966 return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2bf0b568e3de..d426a0bd6a5f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -299,6 +299,9 @@ enum {
299 AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\ 299 AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
300 AZX_DCAPS_SNOOP_TYPE(SCH)) 300 AZX_DCAPS_SNOOP_TYPE(SCH))
301 301
302#define AZX_DCAPS_INTEL_SKYLAKE \
303 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
304
302/* quirks for ATI SB / AMD Hudson */ 305/* quirks for ATI SB / AMD Hudson */
303#define AZX_DCAPS_PRESET_ATI_SB \ 306#define AZX_DCAPS_PRESET_ATI_SB \
304 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\ 307 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2027,7 +2030,7 @@ static const struct pci_device_id azx_ids[] = {
2027 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 2030 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
2028 /* Sunrise Point-LP */ 2031 /* Sunrise Point-LP */
2029 { PCI_DEVICE(0x8086, 0x9d70), 2032 { PCI_DEVICE(0x8086, 0x9d70),
2030 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 2033 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2031 /* Haswell */ 2034 /* Haswell */
2032 { PCI_DEVICE(0x8086, 0x0a0c), 2035 { PCI_DEVICE(0x8086, 0x0a0c),
2033 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2036 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index aa484fdf4338..166e3e84b963 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
171#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 171#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
172#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ 172#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
173#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ 173#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
174#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
174 175
175enum { 176enum {
176 AZX_SNOOP_TYPE_NONE , 177 AZX_SNOOP_TYPE_NONE ,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5f13d2d18079..b422e406a9cb 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3353{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3353{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
3354{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3354{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3355{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3355{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3356{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
3356{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3357{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3357{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3358{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
3358{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3359{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
3413MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3414MODULE_ALIAS("snd-hda-codec-id:10de0067");
3414MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3415MODULE_ALIAS("snd-hda-codec-id:10de0070");
3415MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3416MODULE_ALIAS("snd-hda-codec-id:10de0071");
3417MODULE_ALIAS("snd-hda-codec-id:10de0072");
3416MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3418MODULE_ALIAS("snd-hda-codec-id:10de8001");
3417MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3419MODULE_ALIAS("snd-hda-codec-id:11069f80");
3418MODULE_ALIAS("snd-hda-codec-id:11069f81"); 3420MODULE_ALIAS("snd-hda-codec-id:11069f81");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4f6413e01c13..605d14003d25 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
568 spec->gpio_mask; 568 spec->gpio_mask;
569 } 569 }
570 if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) 570 if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
571 spec->gpio_mask &= spec->gpio_mask;
572 if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
573 spec->gpio_dir &= spec->gpio_mask; 571 spec->gpio_dir &= spec->gpio_mask;
572 if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
573 spec->gpio_data &= spec->gpio_mask;
574 if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) 574 if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
575 spec->eapd_mask &= spec->gpio_mask; 575 spec->eapd_mask &= spec->gpio_mask;
576 if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) 576 if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 81fe1464d268..c0fbe1881439 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -784,8 +784,8 @@ static unsigned int bst_tlv[] = {
784static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol, 784static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
785 struct snd_ctl_elem_value *ucontrol) 785 struct snd_ctl_elem_value *ucontrol)
786{ 786{
787 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 787 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
788 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 788 struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
789 789
790 ucontrol->value.integer.value[0] = rt5677->dsp_vad_en; 790 ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
791 791
@@ -795,8 +795,9 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
795static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol, 795static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
796 struct snd_ctl_elem_value *ucontrol) 796 struct snd_ctl_elem_value *ucontrol)
797{ 797{
798 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 798 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
799 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 799 struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
800 struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
800 801
801 rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0]; 802 rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
802 803
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index b93168d4f648..8d18bbda661b 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -209,16 +209,9 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
209 209
210 switch (config->chan_nr) { 210 switch (config->chan_nr) {
211 case EIGHT_CHANNEL_SUPPORT: 211 case EIGHT_CHANNEL_SUPPORT:
212 ch_reg = 3;
213 break;
214 case SIX_CHANNEL_SUPPORT: 212 case SIX_CHANNEL_SUPPORT:
215 ch_reg = 2;
216 break;
217 case FOUR_CHANNEL_SUPPORT: 213 case FOUR_CHANNEL_SUPPORT:
218 ch_reg = 1;
219 break;
220 case TWO_CHANNEL_SUPPORT: 214 case TWO_CHANNEL_SUPPORT:
221 ch_reg = 0;
222 break; 215 break;
223 default: 216 default:
224 dev_err(dev->dev, "channel not supported\n"); 217 dev_err(dev->dev, "channel not supported\n");
@@ -227,18 +220,22 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
227 220
228 i2s_disable_channels(dev, substream->stream); 221 i2s_disable_channels(dev, substream->stream);
229 222
230 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 223 for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) {
231 i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution); 224 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
232 i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02); 225 i2s_write_reg(dev->i2s_base, TCR(ch_reg),
233 irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); 226 xfer_resolution);
234 i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30); 227 i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
235 i2s_write_reg(dev->i2s_base, TER(ch_reg), 1); 228 irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
236 } else { 229 i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
237 i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution); 230 i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
238 i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07); 231 } else {
239 irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); 232 i2s_write_reg(dev->i2s_base, RCR(ch_reg),
240 i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03); 233 xfer_resolution);
241 i2s_write_reg(dev->i2s_base, RER(ch_reg), 1); 234 i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
235 irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
236 i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
237 i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
238 }
242 } 239 }
243 240
244 i2s_write_reg(dev->i2s_base, CCR, ccr); 241 i2s_write_reg(dev->i2s_base, CCR, ccr);
@@ -263,6 +260,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
263 snd_soc_dai_set_dma_data(dai, substream, NULL); 260 snd_soc_dai_set_dma_data(dai, substream, NULL);
264} 261}
265 262
263static int dw_i2s_prepare(struct snd_pcm_substream *substream,
264 struct snd_soc_dai *dai)
265{
266 struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
267
268 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
269 i2s_write_reg(dev->i2s_base, TXFFR, 1);
270 else
271 i2s_write_reg(dev->i2s_base, RXFFR, 1);
272
273 return 0;
274}
275
266static int dw_i2s_trigger(struct snd_pcm_substream *substream, 276static int dw_i2s_trigger(struct snd_pcm_substream *substream,
267 int cmd, struct snd_soc_dai *dai) 277 int cmd, struct snd_soc_dai *dai)
268{ 278{
@@ -294,6 +304,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
294 .startup = dw_i2s_startup, 304 .startup = dw_i2s_startup,
295 .shutdown = dw_i2s_shutdown, 305 .shutdown = dw_i2s_shutdown,
296 .hw_params = dw_i2s_hw_params, 306 .hw_params = dw_i2s_hw_params,
307 .prepare = dw_i2s_prepare,
297 .trigger = dw_i2s_trigger, 308 .trigger = dw_i2s_trigger,
298}; 309};
299 310
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index e989ecf046c9..f86de1211b96 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -89,7 +89,7 @@ config SND_SOC_INTEL_BROADWELL_MACH
89 89
90config SND_SOC_INTEL_BYTCR_RT5640_MACH 90config SND_SOC_INTEL_BYTCR_RT5640_MACH
91 tristate "ASoC Audio DSP Support for MID BYT Platform" 91 tristate "ASoC Audio DSP Support for MID BYT Platform"
92 depends on X86 92 depends on X86 && I2C
93 select SND_SOC_RT5640 93 select SND_SOC_RT5640
94 select SND_SST_MFLD_PLATFORM 94 select SND_SST_MFLD_PLATFORM
95 select SND_SST_IPC_ACPI 95 select SND_SST_IPC_ACPI
@@ -101,7 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
101 101
102config SND_SOC_INTEL_CHT_BSW_RT5672_MACH 102config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
103 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec" 103 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
104 depends on X86_INTEL_LPSS 104 depends on X86_INTEL_LPSS && I2C
105 select SND_SOC_RT5670 105 select SND_SOC_RT5670
106 select SND_SST_MFLD_PLATFORM 106 select SND_SST_MFLD_PLATFORM
107 select SND_SST_IPC_ACPI 107 select SND_SST_IPC_ACPI
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c
index f5d0fc1ab10c..eef0c56ec32e 100644
--- a/sound/soc/intel/bytcr_dpcm_rt5640.c
+++ b/sound/soc/intel/bytcr_dpcm_rt5640.c
@@ -227,4 +227,4 @@ module_platform_driver(snd_byt_mc_driver);
227MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver"); 227MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
228MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>"); 228MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
229MODULE_LICENSE("GPL v2"); 229MODULE_LICENSE("GPL v2");
230MODULE_ALIAS("platform:bytrt5640-audio"); 230MODULE_ALIAS("platform:bytt100_rt5640");
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index 4a5bde9c686b..ef2e8b5766a1 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -763,8 +763,12 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
763 /* does block span more than 1 section */ 763 /* does block span more than 1 section */
764 if (ba->offset >= block->offset && ba->offset < block_end) { 764 if (ba->offset >= block->offset && ba->offset < block_end) {
765 765
766 /* add block */
767 list_move(&block->list, &dsp->used_block_list);
768 list_add(&block->module_list, block_list);
766 /* align ba to block boundary */ 769 /* align ba to block boundary */
767 ba->offset = block->offset; 770 ba->size -= block_end - ba->offset;
771 ba->offset = block_end;
768 772
769 err = block_alloc_contiguous(dsp, ba, block_list); 773 err = block_alloc_contiguous(dsp, ba, block_list);
770 if (err < 0) 774 if (err < 0)
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 3abc29e8a928..2ac72eb5e75d 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -343,7 +343,7 @@ int sst_acpi_remove(struct platform_device *pdev)
343} 343}
344 344
345static struct sst_machines sst_acpi_bytcr[] = { 345static struct sst_machines sst_acpi_bytcr[] = {
346 {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin", 346 {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
347 &byt_rvp_platform_data }, 347 &byt_rvp_platform_data },
348 {}, 348 {},
349}; 349};
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 26ec5117b35c..13d8507333b8 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -454,11 +454,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
454 454
455 i2s->playback_dma_data.addr = res->start + I2S_TXDR; 455 i2s->playback_dma_data.addr = res->start + I2S_TXDR;
456 i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 456 i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
457 i2s->playback_dma_data.maxburst = 16; 457 i2s->playback_dma_data.maxburst = 4;
458 458
459 i2s->capture_dma_data.addr = res->start + I2S_RXDR; 459 i2s->capture_dma_data.addr = res->start + I2S_RXDR;
460 i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 460 i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
461 i2s->capture_dma_data.maxburst = 16; 461 i2s->capture_dma_data.maxburst = 4;
462 462
463 i2s->dev = &pdev->dev; 463 i2s->dev = &pdev->dev;
464 dev_set_drvdata(&pdev->dev, i2s); 464 dev_set_drvdata(&pdev->dev, i2s);
diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
index 89a5d8bc6ee7..93f456f518a9 100644
--- a/sound/soc/rockchip/rockchip_i2s.h
+++ b/sound/soc/rockchip/rockchip_i2s.h
@@ -127,7 +127,7 @@
127#define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT) 127#define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT)
128#define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT) 128#define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT)
129#define I2S_DMACR_TDL_SHIFT 0 129#define I2S_DMACR_TDL_SHIFT 0
130#define I2S_DMACR_TDL(x) ((x - 1) << I2S_DMACR_TDL_SHIFT) 130#define I2S_DMACR_TDL(x) ((x) << I2S_DMACR_TDL_SHIFT)
131#define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT) 131#define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT)
132 132
133/* 133/*
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 985052b3fbed..2c62620abca6 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3230,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3230 const char *propname) 3230 const char *propname)
3231{ 3231{
3232 struct device_node *np = card->dev->of_node; 3232 struct device_node *np = card->dev->of_node;
3233 int num_routes, old_routes; 3233 int num_routes;
3234 struct snd_soc_dapm_route *routes; 3234 struct snd_soc_dapm_route *routes;
3235 int i, ret; 3235 int i, ret;
3236 3236
@@ -3248,9 +3248,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3248 return -EINVAL; 3248 return -EINVAL;
3249 } 3249 }
3250 3250
3251 old_routes = card->num_dapm_routes; 3251 routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
3252 routes = devm_kzalloc(card->dev,
3253 (old_routes + num_routes) * sizeof(*routes),
3254 GFP_KERNEL); 3252 GFP_KERNEL);
3255 if (!routes) { 3253 if (!routes) {
3256 dev_err(card->dev, 3254 dev_err(card->dev,
@@ -3258,11 +3256,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3258 return -EINVAL; 3256 return -EINVAL;
3259 } 3257 }
3260 3258
3261 memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
3262
3263 for (i = 0; i < num_routes; i++) { 3259 for (i = 0; i < num_routes; i++) {
3264 ret = of_property_read_string_index(np, propname, 3260 ret = of_property_read_string_index(np, propname,
3265 2 * i, &routes[old_routes + i].sink); 3261 2 * i, &routes[i].sink);
3266 if (ret) { 3262 if (ret) {
3267 dev_err(card->dev, 3263 dev_err(card->dev,
3268 "ASoC: Property '%s' index %d could not be read: %d\n", 3264 "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3270,7 +3266,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3270 return -EINVAL; 3266 return -EINVAL;
3271 } 3267 }
3272 ret = of_property_read_string_index(np, propname, 3268 ret = of_property_read_string_index(np, propname,
3273 (2 * i) + 1, &routes[old_routes + i].source); 3269 (2 * i) + 1, &routes[i].source);
3274 if (ret) { 3270 if (ret) {
3275 dev_err(card->dev, 3271 dev_err(card->dev,
3276 "ASoC: Property '%s' index %d could not be read: %d\n", 3272 "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3279,7 +3275,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3279 } 3275 }
3280 } 3276 }
3281 3277
3282 card->num_dapm_routes += num_routes; 3278 card->num_dapm_routes = num_routes;
3283 card->dapm_routes = routes; 3279 card->dapm_routes = routes;
3284 3280
3285 return 0; 3281 return 0;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 272844746135..327f8642ca80 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
816 return -EINVAL; 816 return -EINVAL;
817 } 817 }
818 818
819 if (cdev->n_streams < 2) { 819 if (cdev->n_streams < 1) {
820 dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams); 820 dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
821 return -EINVAL; 821 return -EINVAL;
822 } 822 }
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 6eedba1f7732..653d1bad77de 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -22,6 +22,8 @@
22#error only <linux/bitops.h> can be included directly 22#error only <linux/bitops.h> can be included directly
23#endif 23#endif
24 24
25#include <asm-generic/bitops/hweight.h>
26
25#include <asm-generic/bitops/atomic.h> 27#include <asm-generic/bitops/atomic.h>
26 28
27#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */ 29#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/arch_hweight.h b/tools/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000000..318bb2b202b0
--- /dev/null
+++ b/tools/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1 @@
#include "../../../../include/asm-generic/bitops/arch_hweight.h"
diff --git a/tools/include/asm-generic/bitops/const_hweight.h b/tools/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000000..0afd644aff83
--- /dev/null
+++ b/tools/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1 @@
#include "../../../../include/asm-generic/bitops/const_hweight.h"
diff --git a/tools/include/asm-generic/bitops/hweight.h b/tools/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000000..290120c01a8e
--- /dev/null
+++ b/tools/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,7 @@
1#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
2#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
3
4#include <asm-generic/bitops/arch_hweight.h>
5#include <asm-generic/bitops/const_hweight.h>
6
7#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 26005a15e7e2..5ad9ee1dd7f6 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -1,9 +1,9 @@
1#ifndef _TOOLS_LINUX_BITOPS_H_ 1#ifndef _TOOLS_LINUX_BITOPS_H_
2#define _TOOLS_LINUX_BITOPS_H_ 2#define _TOOLS_LINUX_BITOPS_H_
3 3
4#include <asm/types.h>
4#include <linux/kernel.h> 5#include <linux/kernel.h>
5#include <linux/compiler.h> 6#include <linux/compiler.h>
6#include <asm/hweight.h>
7 7
8#ifndef __WORDSIZE 8#ifndef __WORDSIZE
9#define __WORDSIZE (__SIZEOF_LONG__ * 8) 9#define __WORDSIZE (__SIZEOF_LONG__ * 8)
@@ -19,6 +19,11 @@
19#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) 19#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
20#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) 20#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
21 21
22extern unsigned int __sw_hweight8(unsigned int w);
23extern unsigned int __sw_hweight16(unsigned int w);
24extern unsigned int __sw_hweight32(unsigned int w);
25extern unsigned long __sw_hweight64(__u64 w);
26
22/* 27/*
23 * Include this here because some architectures need generic_ffs/fls in 28 * Include this here because some architectures need generic_ffs/fls in
24 * scope 29 * scope
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index a74fba6d7743..86ea2d7b8845 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -67,7 +67,7 @@ int debugfs_valid_mountpoint(const char *debugfs)
67 67
68 if (statfs(debugfs, &st_fs) < 0) 68 if (statfs(debugfs, &st_fs) < 0)
69 return -ENOENT; 69 return -ENOENT;
70 else if (st_fs.f_type != (long) DEBUGFS_MAGIC) 70 else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
71 return -ENOENT; 71 return -ENOENT;
72 72
73 return 0; 73 return 0;
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 65d9be3f9887..128ef6332a6b 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -79,7 +79,7 @@ static int fs__valid_mount(const char *fs, long magic)
79 79
80 if (statfs(fs, &st_fs) < 0) 80 if (statfs(fs, &st_fs) < 0)
81 return -ENOENT; 81 return -ENOENT;
82 else if (st_fs.f_type != magic) 82 else if ((long)st_fs.f_type != magic)
83 return -ENOENT; 83 return -ENOENT;
84 84
85 return 0; 85 return 0;
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 6f803609e498..0b0112c80f22 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
317 * 317 *
318 * TODO: Hook into free() and add that check there as well. 318 * TODO: Hook into free() and add that check there as well.
319 */ 319 */
320 debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); 320 debug_check_no_locks_freed(mutex, sizeof(*mutex));
321 __del_lock(__get_lock(mutex)); 321 __del_lock(__get_lock(mutex));
322 return ll_pthread_mutex_destroy(mutex); 322 return ll_pthread_mutex_destroy(mutex);
323} 323}
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
341{ 341{
342 try_init_preload(); 342 try_init_preload();
343 343
344 debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); 344 debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
345 __del_lock(__get_lock(rwlock)); 345 __del_lock(__get_lock(rwlock));
346 return ll_pthread_rwlock_destroy(rwlock); 346 return ll_pthread_rwlock_destroy(rwlock);
347} 347}
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 83e2887f91a3..fbbfdc39271d 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -6,12 +6,15 @@ tools/lib/symbol/kallsyms.c
6tools/lib/symbol/kallsyms.h 6tools/lib/symbol/kallsyms.h
7tools/lib/util/find_next_bit.c 7tools/lib/util/find_next_bit.c
8tools/include/asm/bug.h 8tools/include/asm/bug.h
9tools/include/asm-generic/bitops/arch_hweight.h
9tools/include/asm-generic/bitops/atomic.h 10tools/include/asm-generic/bitops/atomic.h
11tools/include/asm-generic/bitops/const_hweight.h
10tools/include/asm-generic/bitops/__ffs.h 12tools/include/asm-generic/bitops/__ffs.h
11tools/include/asm-generic/bitops/__fls.h 13tools/include/asm-generic/bitops/__fls.h
12tools/include/asm-generic/bitops/find.h 14tools/include/asm-generic/bitops/find.h
13tools/include/asm-generic/bitops/fls64.h 15tools/include/asm-generic/bitops/fls64.h
14tools/include/asm-generic/bitops/fls.h 16tools/include/asm-generic/bitops/fls.h
17tools/include/asm-generic/bitops/hweight.h
15tools/include/asm-generic/bitops.h 18tools/include/asm-generic/bitops.h
16tools/include/linux/bitops.h 19tools/include/linux/bitops.h
17tools/include/linux/compiler.h 20tools/include/linux/compiler.h
@@ -19,6 +22,8 @@ tools/include/linux/export.h
19tools/include/linux/hash.h 22tools/include/linux/hash.h
20tools/include/linux/log2.h 23tools/include/linux/log2.h
21tools/include/linux/types.h 24tools/include/linux/types.h
25include/asm-generic/bitops/arch_hweight.h
26include/asm-generic/bitops/const_hweight.h
22include/asm-generic/bitops/fls64.h 27include/asm-generic/bitops/fls64.h
23include/asm-generic/bitops/__fls.h 28include/asm-generic/bitops/__fls.h
24include/asm-generic/bitops/fls.h 29include/asm-generic/bitops/fls.h
@@ -29,6 +34,7 @@ include/linux/list.h
29include/linux/hash.h 34include/linux/hash.h
30include/linux/stringify.h 35include/linux/stringify.h
31lib/find_next_bit.c 36lib/find_next_bit.c
37lib/hweight.c
32lib/rbtree.c 38lib/rbtree.c
33include/linux/swab.h 39include/linux/swab.h
34arch/*/include/asm/unistd*.h 40arch/*/include/asm/unistd*.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 67a03a825b3c..aa6a50447c32 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -232,12 +232,15 @@ LIB_H += ../include/linux/hash.h
232LIB_H += ../../include/linux/stringify.h 232LIB_H += ../../include/linux/stringify.h
233LIB_H += util/include/linux/bitmap.h 233LIB_H += util/include/linux/bitmap.h
234LIB_H += ../include/linux/bitops.h 234LIB_H += ../include/linux/bitops.h
235LIB_H += ../include/asm-generic/bitops/arch_hweight.h
235LIB_H += ../include/asm-generic/bitops/atomic.h 236LIB_H += ../include/asm-generic/bitops/atomic.h
237LIB_H += ../include/asm-generic/bitops/const_hweight.h
236LIB_H += ../include/asm-generic/bitops/find.h 238LIB_H += ../include/asm-generic/bitops/find.h
237LIB_H += ../include/asm-generic/bitops/fls64.h 239LIB_H += ../include/asm-generic/bitops/fls64.h
238LIB_H += ../include/asm-generic/bitops/fls.h 240LIB_H += ../include/asm-generic/bitops/fls.h
239LIB_H += ../include/asm-generic/bitops/__ffs.h 241LIB_H += ../include/asm-generic/bitops/__ffs.h
240LIB_H += ../include/asm-generic/bitops/__fls.h 242LIB_H += ../include/asm-generic/bitops/__fls.h
243LIB_H += ../include/asm-generic/bitops/hweight.h
241LIB_H += ../include/asm-generic/bitops.h 244LIB_H += ../include/asm-generic/bitops.h
242LIB_H += ../include/linux/compiler.h 245LIB_H += ../include/linux/compiler.h
243LIB_H += ../include/linux/log2.h 246LIB_H += ../include/linux/log2.h
@@ -255,7 +258,6 @@ LIB_H += util/include/linux/linkage.h
255LIB_H += util/include/asm/asm-offsets.h 258LIB_H += util/include/asm/asm-offsets.h
256LIB_H += ../include/asm/bug.h 259LIB_H += ../include/asm/bug.h
257LIB_H += util/include/asm/byteorder.h 260LIB_H += util/include/asm/byteorder.h
258LIB_H += util/include/asm/hweight.h
259LIB_H += util/include/asm/swab.h 261LIB_H += util/include/asm/swab.h
260LIB_H += util/include/asm/system.h 262LIB_H += util/include/asm/system.h
261LIB_H += util/include/asm/uaccess.h 263LIB_H += util/include/asm/uaccess.h
@@ -462,10 +464,12 @@ BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
462# Benchmark modules 464# Benchmark modules
463BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o 465BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
464BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o 466BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
465ifeq ($(RAW_ARCH),x86_64) 467ifeq ($(ARCH), x86)
468ifeq ($(IS_64_BIT), 1)
466BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o 469BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
467BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o 470BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
468endif 471endif
472endif
469BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o 473BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
470BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o 474BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
471BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o 475BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
@@ -743,6 +747,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
743$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS 747$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
744 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 748 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
745 749
750$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
751 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
752
746$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS 753$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
747 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 754 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
748 755
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 3bb50eac5542..0c370f81e002 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -103,7 +103,7 @@ static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
103 return NULL; 103 return NULL;
104 } 104 }
105 105
106 result = dwarf_cfi_addrframe(cfi, pc, &frame); 106 result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
107 if (result) { 107 if (result) {
108 pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); 108 pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
109 return NULL; 109 return NULL;
@@ -128,7 +128,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
128 return NULL; 128 return NULL;
129 } 129 }
130 130
131 result = dwarf_cfi_addrframe(cfi, pc, &frame); 131 result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
132 if (result) { 132 if (result) {
133 pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); 133 pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
134 return NULL; 134 return NULL;
@@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
145 * yet used) 145 * yet used)
146 * -1 in case of errors 146 * -1 in case of errors
147 */ 147 */
148static int check_return_addr(struct dso *dso, Dwarf_Addr pc) 148static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
149{ 149{
150 int rc = -1; 150 int rc = -1;
151 Dwfl *dwfl; 151 Dwfl *dwfl;
@@ -155,6 +155,7 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
155 Dwarf_Addr start = pc; 155 Dwarf_Addr start = pc;
156 Dwarf_Addr end = pc; 156 Dwarf_Addr end = pc;
157 bool signalp; 157 bool signalp;
158 const char *exec_file = dso->long_name;
158 159
159 dwfl = dso->dwfl; 160 dwfl = dso->dwfl;
160 161
@@ -165,8 +166,10 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
165 return -1; 166 return -1;
166 } 167 }
167 168
168 if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) { 169 mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1,
169 pr_debug("dwfl_report_offline() failed %s\n", 170 map_start, false);
171 if (!mod) {
172 pr_debug("dwfl_report_elf() failed %s\n",
170 dwarf_errmsg(-1)); 173 dwarf_errmsg(-1));
171 /* 174 /*
172 * We normally cache the DWARF debug info and never 175 * We normally cache the DWARF debug info and never
@@ -256,10 +259,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
256 return skip_slot; 259 return skip_slot;
257 } 260 }
258 261
259 rc = check_return_addr(dso, ip); 262 rc = check_return_addr(dso, al.map->start, ip);
260 263
261 pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n", 264 pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
262 dso->long_name, chain->nr, ip, rc); 265 dso->long_name, al.sym->name, ip, rc);
263 266
264 if (rc == 0) { 267 if (rc == 0) {
265 /* 268 /*
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 07a8d7646a15..005cc283790c 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -19,12 +19,12 @@
19#include <stdlib.h> 19#include <stdlib.h>
20#include <signal.h> 20#include <signal.h>
21#include <sys/wait.h> 21#include <sys/wait.h>
22#include <linux/unistd.h>
23#include <string.h> 22#include <string.h>
24#include <errno.h> 23#include <errno.h>
25#include <assert.h> 24#include <assert.h>
26#include <sys/time.h> 25#include <sys/time.h>
27#include <sys/types.h> 26#include <sys/types.h>
27#include <sys/syscall.h>
28 28
29#include <pthread.h> 29#include <pthread.h>
30 30
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e7417fe97a97..747f86103599 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
232 if (nr_samples > 0) { 232 if (nr_samples > 0) {
233 total_nr_samples += nr_samples; 233 total_nr_samples += nr_samples;
234 hists__collapse_resort(hists, NULL); 234 hists__collapse_resort(hists, NULL);
235 hists__output_resort(hists); 235 hists__output_resort(hists, NULL);
236 236
237 if (symbol_conf.event_group && 237 if (symbol_conf.event_group &&
238 !perf_evsel__is_group_leader(pos)) 238 !perf_evsel__is_group_leader(pos))
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1ce425d101a9..1fd96c13f199 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
545 return __hist_entry__cmp_compute(p_left, p_right, c); 545 return __hist_entry__cmp_compute(p_left, p_right, c);
546} 546}
547 547
548static int64_t
549hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
550 struct hist_entry *right __maybe_unused)
551{
552 return 0;
553}
554
555static int64_t
556hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
557{
558 if (sort_compute)
559 return 0;
560
561 if (left->stat.period == right->stat.period)
562 return 0;
563 return left->stat.period > right->stat.period ? 1 : -1;
564}
565
566static int64_t
567hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
568{
569 return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
570}
571
572static int64_t
573hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
574{
575 return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
576}
577
578static int64_t
579hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
580{
581 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
582}
583
548static void insert_hist_entry_by_compute(struct rb_root *root, 584static void insert_hist_entry_by_compute(struct rb_root *root,
549 struct hist_entry *he, 585 struct hist_entry *he,
550 int c) 586 int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
605 hists__precompute(hists); 641 hists__precompute(hists);
606 hists__compute_resort(hists); 642 hists__compute_resort(hists);
607 } else { 643 } else {
608 hists__output_resort(hists); 644 hists__output_resort(hists, NULL);
609 } 645 }
610 646
611 hists__fprintf(hists, true, 0, 0, 0, stdout); 647 hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
1038 fmt->header = hpp__header; 1074 fmt->header = hpp__header;
1039 fmt->width = hpp__width; 1075 fmt->width = hpp__width;
1040 fmt->entry = hpp__entry_global; 1076 fmt->entry = hpp__entry_global;
1077 fmt->cmp = hist_entry__cmp_nop;
1078 fmt->collapse = hist_entry__cmp_nop;
1041 1079
1042 /* TODO more colors */ 1080 /* TODO more colors */
1043 switch (idx) { 1081 switch (idx) {
1044 case PERF_HPP_DIFF__BASELINE: 1082 case PERF_HPP_DIFF__BASELINE:
1045 fmt->color = hpp__color_baseline; 1083 fmt->color = hpp__color_baseline;
1084 fmt->sort = hist_entry__cmp_baseline;
1046 break; 1085 break;
1047 case PERF_HPP_DIFF__DELTA: 1086 case PERF_HPP_DIFF__DELTA:
1048 fmt->color = hpp__color_delta; 1087 fmt->color = hpp__color_delta;
1088 fmt->sort = hist_entry__cmp_delta;
1049 break; 1089 break;
1050 case PERF_HPP_DIFF__RATIO: 1090 case PERF_HPP_DIFF__RATIO:
1051 fmt->color = hpp__color_ratio; 1091 fmt->color = hpp__color_ratio;
1092 fmt->sort = hist_entry__cmp_ratio;
1052 break; 1093 break;
1053 case PERF_HPP_DIFF__WEIGHTED_DIFF: 1094 case PERF_HPP_DIFF__WEIGHTED_DIFF:
1054 fmt->color = hpp__color_wdiff; 1095 fmt->color = hpp__color_wdiff;
1096 fmt->sort = hist_entry__cmp_wdiff;
1055 break; 1097 break;
1056 default: 1098 default:
1099 fmt->sort = hist_entry__cmp_nop;
1057 break; 1100 break;
1058 } 1101 }
1059 1102
1060 init_header(d, dfmt); 1103 init_header(d, dfmt);
1061 perf_hpp__column_register(fmt); 1104 perf_hpp__column_register(fmt);
1105 perf_hpp__register_sort_field(fmt);
1062} 1106}
1063 1107
1064static void ui_init(void) 1108static void ui_init(void)
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 011195e38f21..198f3c3aff95 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -19,7 +19,9 @@
19int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) 19int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
20{ 20{
21 int i; 21 int i;
22 const struct option list_options[] = { 22 bool raw_dump = false;
23 struct option list_options[] = {
24 OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
23 OPT_END() 25 OPT_END()
24 }; 26 };
25 const char * const list_usage[] = { 27 const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
27 NULL 29 NULL
28 }; 30 };
29 31
32 set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
33
30 argc = parse_options(argc, argv, list_options, list_usage, 34 argc = parse_options(argc, argv, list_options, list_usage,
31 PARSE_OPT_STOP_AT_NON_OPTION); 35 PARSE_OPT_STOP_AT_NON_OPTION);
32 36
33 setup_pager(); 37 setup_pager();
34 38
39 if (raw_dump) {
40 print_events(NULL, true);
41 return 0;
42 }
43
35 if (argc == 0) { 44 if (argc == 0) {
36 print_events(NULL, false); 45 print_events(NULL, false);
37 return 0; 46 return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
53 print_hwcache_events(NULL, false); 62 print_hwcache_events(NULL, false);
54 else if (strcmp(argv[i], "pmu") == 0) 63 else if (strcmp(argv[i], "pmu") == 0)
55 print_pmu_events(NULL, false); 64 print_pmu_events(NULL, false);
56 else if (strcmp(argv[i], "--raw-dump") == 0)
57 print_events(NULL, true);
58 else { 65 else {
59 char *sep = strchr(argv[i], ':'), *s; 66 char *sep = strchr(argv[i], ':'), *s;
60 int sep_idx; 67 int sep_idx;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 39367609c707..072ae8ad67fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
457 ui_progress__finish(); 457 ui_progress__finish();
458} 458}
459 459
460static void report__output_resort(struct report *rep)
461{
462 struct ui_progress prog;
463 struct perf_evsel *pos;
464
465 ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
466
467 evlist__for_each(rep->session->evlist, pos)
468 hists__output_resort(evsel__hists(pos), &prog);
469
470 ui_progress__finish();
471}
472
460static int __cmd_report(struct report *rep) 473static int __cmd_report(struct report *rep)
461{ 474{
462 int ret; 475 int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
505 if (session_done()) 518 if (session_done())
506 return 0; 519 return 0;
507 520
521 /*
522 * recalculate number of entries after collapsing since it
523 * might be changed during the collapse phase.
524 */
525 rep->nr_entries = 0;
526 evlist__for_each(session->evlist, pos)
527 rep->nr_entries += evsel__hists(pos)->nr_entries;
528
508 if (rep->nr_entries == 0) { 529 if (rep->nr_entries == 0) {
509 ui__error("The %s file has no samples!\n", file->path); 530 ui__error("The %s file has no samples!\n", file->path);
510 return 0; 531 return 0;
511 } 532 }
512 533
513 evlist__for_each(session->evlist, pos) 534 report__output_resort(rep);
514 hists__output_resort(evsel__hists(pos));
515 535
516 return report__browse_hists(rep); 536 return report__browse_hists(rep);
517} 537}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 0aa7747ff139..616f0fcb4701 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -66,7 +66,6 @@
66#include <sys/utsname.h> 66#include <sys/utsname.h>
67#include <sys/mman.h> 67#include <sys/mman.h>
68 68
69#include <linux/unistd.h>
70#include <linux/types.h> 69#include <linux/types.h>
71 70
72static volatile int done; 71static volatile int done;
@@ -285,7 +284,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
285 } 284 }
286 285
287 hists__collapse_resort(hists, NULL); 286 hists__collapse_resort(hists, NULL);
288 hists__output_resort(hists); 287 hists__output_resort(hists, NULL);
289 288
290 hists__output_recalc_col_len(hists, top->print_entries - printed); 289 hists__output_recalc_col_len(hists, top->print_entries - printed);
291 putchar('\n'); 290 putchar('\n');
@@ -554,7 +553,7 @@ static void perf_top__sort_new_samples(void *arg)
554 } 553 }
555 554
556 hists__collapse_resort(hists, NULL); 555 hists__collapse_resort(hists, NULL);
557 hists__output_resort(hists); 556 hists__output_resort(hists, NULL);
558} 557}
559 558
560static void *display_thread_tui(void *arg) 559static void *display_thread_tui(void *arg)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 5d4b039fe1ed..648e31ff4021 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -20,7 +20,7 @@ NO_PERF_REGS := 1
20 20
21# Additional ARCH settings for x86 21# Additional ARCH settings for x86
22ifeq ($(ARCH),x86) 22ifeq ($(ARCH),x86)
23 ifeq (${IS_X86_64}, 1) 23 ifeq (${IS_64_BIT}, 1)
24 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT 24 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
25 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S 25 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
26 LIBUNWIND_LIBS = -lunwind -lunwind-x86_64 26 LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index 851cd0172a76..ff95a68741d1 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -1,7 +1,7 @@
1 1
2uname_M := $(shell uname -m 2>/dev/null || echo not) 2uname_M := $(shell uname -m 2>/dev/null || echo not)
3 3
4ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ 4RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
5 -e s/arm.*/arm/ -e s/sa110/arm/ \ 5 -e s/arm.*/arm/ -e s/sa110/arm/ \
6 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 6 -e s/s390x/s390/ -e s/parisc64/parisc/ \
7 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 7 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
@@ -9,23 +9,23 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
9 -e s/tile.*/tile/ ) 9 -e s/tile.*/tile/ )
10 10
11# Additional ARCH settings for x86 11# Additional ARCH settings for x86
12ifeq ($(ARCH),i386) 12ifeq ($(RAW_ARCH),i386)
13 override ARCH := x86 13 ARCH ?= x86
14endif 14endif
15 15
16ifeq ($(ARCH),x86_64) 16ifeq ($(RAW_ARCH),x86_64)
17 override ARCH := x86 17 ARCH ?= x86
18 IS_X86_64 := 0 18
19 ifeq (, $(findstring m32,$(CFLAGS))) 19 ifneq (, $(findstring m32,$(CFLAGS)))
20 IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1) 20 RAW_ARCH := x86_32
21 RAW_ARCH := x86_64
22 endif 21 endif
23endif 22endif
24 23
25ifeq (${IS_X86_64}, 1) 24ARCH ?= $(RAW_ARCH)
25
26LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
27ifeq ($(LP64), 1)
26 IS_64_BIT := 1 28 IS_64_BIT := 1
27else ifeq ($(ARCH),x86)
28 IS_64_BIT := 0
29else 29else
30 IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) 30 IS_64_BIT := 0
31endif 31endif
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index a3b13d7dc1d4..6ef68165c9db 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -6,7 +6,6 @@
6#include <sys/syscall.h> 6#include <sys/syscall.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/perf_event.h> 8#include <linux/perf_event.h>
9#include <asm/unistd.h>
10 9
11#if defined(__i386__) 10#if defined(__i386__)
12#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 11#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index ab28cca2cb97..0bf06bec68c7 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -11,6 +11,9 @@
11#include "thread.h" 11#include "thread.h"
12#include "callchain.h" 12#include "callchain.h"
13 13
14/* For bsearch. We try to unwind functions in shared object. */
15#include <stdlib.h>
16
14static int mmap_handler(struct perf_tool *tool __maybe_unused, 17static int mmap_handler(struct perf_tool *tool __maybe_unused,
15 union perf_event *event, 18 union perf_event *event,
16 struct perf_sample *sample __maybe_unused, 19 struct perf_sample *sample __maybe_unused,
@@ -28,7 +31,7 @@ static int init_live_machine(struct machine *machine)
28 mmap_handler, machine, true); 31 mmap_handler, machine, true);
29} 32}
30 33
31#define MAX_STACK 6 34#define MAX_STACK 8
32 35
33static int unwind_entry(struct unwind_entry *entry, void *arg) 36static int unwind_entry(struct unwind_entry *entry, void *arg)
34{ 37{
@@ -37,6 +40,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
37 static const char *funcs[MAX_STACK] = { 40 static const char *funcs[MAX_STACK] = {
38 "test__arch_unwind_sample", 41 "test__arch_unwind_sample",
39 "unwind_thread", 42 "unwind_thread",
43 "compare",
44 "bsearch",
40 "krava_3", 45 "krava_3",
41 "krava_2", 46 "krava_2",
42 "krava_1", 47 "krava_1",
@@ -88,10 +93,37 @@ static int unwind_thread(struct thread *thread)
88 return err; 93 return err;
89} 94}
90 95
96static int global_unwind_retval = -INT_MAX;
97
98__attribute__ ((noinline))
99static int compare(void *p1, void *p2)
100{
101 /* Any possible value should be 'thread' */
102 struct thread *thread = *(struct thread **)p1;
103
104 if (global_unwind_retval == -INT_MAX)
105 global_unwind_retval = unwind_thread(thread);
106
107 return p1 - p2;
108}
109
91__attribute__ ((noinline)) 110__attribute__ ((noinline))
92static int krava_3(struct thread *thread) 111static int krava_3(struct thread *thread)
93{ 112{
94 return unwind_thread(thread); 113 struct thread *array[2] = {thread, thread};
114 void *fp = &bsearch;
115 /*
116 * make _bsearch a volatile function pointer to
117 * prevent potential optimization, which may expand
118 * bsearch and call compare directly from this function,
119 * instead of libc shared object.
120 */
121 void *(*volatile _bsearch)(void *, void *, size_t,
122 size_t, int (*)(void *, void *));
123
124 _bsearch = fp;
125 _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
126 return global_unwind_retval;
95} 127}
96 128
97__attribute__ ((noinline)) 129__attribute__ ((noinline))
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 614d5c4978ab..8d110dec393e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
187 * function since TEST_ASSERT_VAL() returns in case of failure. 187 * function since TEST_ASSERT_VAL() returns in case of failure.
188 */ 188 */
189 hists__collapse_resort(hists, NULL); 189 hists__collapse_resort(hists, NULL);
190 hists__output_resort(hists); 190 hists__output_resort(hists, NULL);
191 191
192 if (verbose > 2) { 192 if (verbose > 2) {
193 pr_info("use callchain: %d, cumulate callchain: %d\n", 193 pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
454 * 30.00% 10.00% perf perf [.] cmd_record 454 * 30.00% 10.00% perf perf [.] cmd_record
455 * 20.00% 0.00% bash libc [.] malloc 455 * 20.00% 0.00% bash libc [.] malloc
456 * 10.00% 10.00% bash [kernel] [k] page_fault 456 * 10.00% 10.00% bash [kernel] [k] page_fault
457 * 10.00% 10.00% perf [kernel] [k] schedule 457 * 10.00% 10.00% bash bash [.] xmalloc
458 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
459 * 10.00% 10.00% perf [kernel] [k] page_fault 458 * 10.00% 10.00% perf [kernel] [k] page_fault
460 * 10.00% 10.00% perf libc [.] free
461 * 10.00% 10.00% perf libc [.] malloc 459 * 10.00% 10.00% perf libc [.] malloc
462 * 10.00% 10.00% bash bash [.] xmalloc 460 * 10.00% 10.00% perf [kernel] [k] schedule
461 * 10.00% 10.00% perf libc [.] free
462 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
463 */ 463 */
464 struct result expected[] = { 464 struct result expected[] = {
465 { 7000, 2000, "perf", "perf", "main" }, 465 { 7000, 2000, "perf", "perf", "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
468 { 3000, 1000, "perf", "perf", "cmd_record" }, 468 { 3000, 1000, "perf", "perf", "cmd_record" },
469 { 2000, 0, "bash", "libc", "malloc" }, 469 { 2000, 0, "bash", "libc", "malloc" },
470 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 470 { 1000, 1000, "bash", "[kernel]", "page_fault" },
471 { 1000, 1000, "perf", "[kernel]", "schedule" }, 471 { 1000, 1000, "bash", "bash", "xmalloc" },
472 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
473 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 472 { 1000, 1000, "perf", "[kernel]", "page_fault" },
473 { 1000, 1000, "perf", "[kernel]", "schedule" },
474 { 1000, 1000, "perf", "libc", "free" }, 474 { 1000, 1000, "perf", "libc", "free" },
475 { 1000, 1000, "perf", "libc", "malloc" }, 475 { 1000, 1000, "perf", "libc", "malloc" },
476 { 1000, 1000, "bash", "bash", "xmalloc" }, 476 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
477 }; 477 };
478 478
479 symbol_conf.use_callchain = false; 479 symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
537 * malloc 537 * malloc
538 * main 538 * main
539 * 539 *
540 * 10.00% 10.00% perf [kernel] [k] schedule 540 * 10.00% 10.00% bash bash [.] xmalloc
541 * | 541 * |
542 * --- schedule 542 * --- xmalloc
543 * run_command 543 * malloc
544 * xmalloc <--- NOTE: there's a cycle
545 * malloc
546 * xmalloc
544 * main 547 * main
545 * 548 *
546 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 549 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
556 * run_command 559 * run_command
557 * main 560 * main
558 * 561 *
562 * 10.00% 10.00% perf [kernel] [k] schedule
563 * |
564 * --- schedule
565 * run_command
566 * main
567 *
559 * 10.00% 10.00% perf libc [.] free 568 * 10.00% 10.00% perf libc [.] free
560 * | 569 * |
561 * --- free 570 * --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
570 * run_command 579 * run_command
571 * main 580 * main
572 * 581 *
573 * 10.00% 10.00% bash bash [.] xmalloc
574 * |
575 * --- xmalloc
576 * malloc
577 * xmalloc <--- NOTE: there's a cycle
578 * malloc
579 * xmalloc
580 * main
581 *
582 */ 582 */
583 struct result expected[] = { 583 struct result expected[] = {
584 { 7000, 2000, "perf", "perf", "main" }, 584 { 7000, 2000, "perf", "perf", "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
587 { 3000, 1000, "perf", "perf", "cmd_record" }, 587 { 3000, 1000, "perf", "perf", "cmd_record" },
588 { 2000, 0, "bash", "libc", "malloc" }, 588 { 2000, 0, "bash", "libc", "malloc" },
589 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 589 { 1000, 1000, "bash", "[kernel]", "page_fault" },
590 { 1000, 1000, "perf", "[kernel]", "schedule" }, 590 { 1000, 1000, "bash", "bash", "xmalloc" },
591 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 591 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
592 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 592 { 1000, 1000, "perf", "[kernel]", "page_fault" },
593 { 1000, 1000, "perf", "[kernel]", "schedule" },
593 { 1000, 1000, "perf", "libc", "free" }, 594 { 1000, 1000, "perf", "libc", "free" },
594 { 1000, 1000, "perf", "libc", "malloc" }, 595 { 1000, 1000, "perf", "libc", "malloc" },
595 { 1000, 1000, "bash", "bash", "xmalloc" },
596 }; 596 };
597 struct callchain_result expected_callchain[] = { 597 struct callchain_result expected_callchain[] = {
598 { 598 {
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
622 { "bash", "main" }, }, 622 { "bash", "main" }, },
623 }, 623 },
624 { 624 {
625 3, { { "[kernel]", "schedule" }, 625 6, { { "bash", "xmalloc" },
626 { "perf", "run_command" }, 626 { "libc", "malloc" },
627 { "perf", "main" }, }, 627 { "bash", "xmalloc" },
628 { "libc", "malloc" },
629 { "bash", "xmalloc" },
630 { "bash", "main" }, },
628 }, 631 },
629 { 632 {
630 3, { { "[kernel]", "sys_perf_event_open" }, 633 3, { { "[kernel]", "sys_perf_event_open" },
@@ -638,6 +641,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
638 { "perf", "main" }, }, 641 { "perf", "main" }, },
639 }, 642 },
640 { 643 {
644 3, { { "[kernel]", "schedule" },
645 { "perf", "run_command" },
646 { "perf", "main" }, },
647 },
648 {
641 4, { { "libc", "free" }, 649 4, { { "libc", "free" },
642 { "perf", "cmd_record" }, 650 { "perf", "cmd_record" },
643 { "perf", "run_command" }, 651 { "perf", "run_command" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
649 { "perf", "run_command" }, 657 { "perf", "run_command" },
650 { "perf", "main" }, }, 658 { "perf", "main" }, },
651 }, 659 },
652 {
653 6, { { "bash", "xmalloc" },
654 { "libc", "malloc" },
655 { "bash", "xmalloc" },
656 { "libc", "malloc" },
657 { "bash", "xmalloc" },
658 { "bash", "main" }, },
659 },
660 }; 660 };
661 661
662 symbol_conf.use_callchain = true; 662 symbol_conf.use_callchain = true;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 74f257a81265..59e53db7914c 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -138,7 +138,7 @@ int test__hists_filter(void)
138 struct hists *hists = evsel__hists(evsel); 138 struct hists *hists = evsel__hists(evsel);
139 139
140 hists__collapse_resort(hists, NULL); 140 hists__collapse_resort(hists, NULL);
141 hists__output_resort(hists); 141 hists__output_resort(hists, NULL);
142 142
143 if (verbose > 2) { 143 if (verbose > 2) {
144 pr_info("Normal histogram\n"); 144 pr_info("Normal histogram\n");
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index a748f2be1222..f5547610da02 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
152 goto out; 152 goto out;
153 153
154 hists__collapse_resort(hists, NULL); 154 hists__collapse_resort(hists, NULL);
155 hists__output_resort(hists); 155 hists__output_resort(hists, NULL);
156 156
157 if (verbose > 2) { 157 if (verbose > 2) {
158 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 158 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
252 goto out; 252 goto out;
253 253
254 hists__collapse_resort(hists, NULL); 254 hists__collapse_resort(hists, NULL);
255 hists__output_resort(hists); 255 hists__output_resort(hists, NULL);
256 256
257 if (verbose > 2) { 257 if (verbose > 2) {
258 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 258 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
306 goto out; 306 goto out;
307 307
308 hists__collapse_resort(hists, NULL); 308 hists__collapse_resort(hists, NULL);
309 hists__output_resort(hists); 309 hists__output_resort(hists, NULL);
310 310
311 if (verbose > 2) { 311 if (verbose > 2) {
312 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 312 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
384 goto out; 384 goto out;
385 385
386 hists__collapse_resort(hists, NULL); 386 hists__collapse_resort(hists, NULL);
387 hists__output_resort(hists); 387 hists__output_resort(hists, NULL);
388 388
389 if (verbose > 2) { 389 if (verbose > 2) {
390 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 390 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
487 goto out; 487 goto out;
488 488
489 hists__collapse_resort(hists, NULL); 489 hists__collapse_resort(hists, NULL);
490 hists__output_resort(hists); 490 hists__output_resort(hists, NULL);
491 491
492 if (verbose > 2) { 492 if (verbose > 2) {
493 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 493 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e6bb04b5b09b..788506eef567 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
550 bool need_percent; 550 bool need_percent;
551 551
552 node = rb_first(root); 552 node = rb_first(root);
553 need_percent = !!rb_next(node); 553 need_percent = node && rb_next(node);
554 554
555 while (node) { 555 while (node) {
556 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); 556 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index dc0d095f318c..482adae3cc44 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
204 if (ret) 204 if (ret)
205 return ret; 205 return ret;
206 206
207 if (a->thread != b->thread || !symbol_conf.use_callchain)
208 return 0;
209
207 ret = b->callchain->max_depth - a->callchain->max_depth; 210 ret = b->callchain->max_depth - a->callchain->max_depth;
208 } 211 }
209 return ret; 212 return ret;
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 2f612562978c..3c38f25b1695 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -1,5 +1,8 @@
1#include <signal.h> 1#include <signal.h>
2#include <stdbool.h> 2#include <stdbool.h>
3#ifdef HAVE_BACKTRACE_SUPPORT
4#include <execinfo.h>
5#endif
3 6
4#include "../../util/cache.h" 7#include "../../util/cache.h"
5#include "../../util/debug.h" 8#include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
88 return SLkp_getkey(); 91 return SLkp_getkey();
89} 92}
90 93
94#ifdef HAVE_BACKTRACE_SUPPORT
95static void ui__signal_backtrace(int sig)
96{
97 void *stackdump[32];
98 size_t size;
99
100 ui__exit(false);
101 psignal(sig, "perf");
102
103 printf("-------- backtrace --------\n");
104 size = backtrace(stackdump, ARRAY_SIZE(stackdump));
105 backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
106
107 exit(0);
108}
109#else
110# define ui__signal_backtrace ui__signal
111#endif
112
91static void ui__signal(int sig) 113static void ui__signal(int sig)
92{ 114{
93 ui__exit(false); 115 ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
122 ui_browser__init(); 144 ui_browser__init();
123 tui_progress__init(); 145 tui_progress__init();
124 146
125 signal(SIGSEGV, ui__signal); 147 signal(SIGSEGV, ui__signal_backtrace);
126 signal(SIGFPE, ui__signal); 148 signal(SIGFPE, ui__signal_backtrace);
127 signal(SIGINT, ui__signal); 149 signal(SIGINT, ui__signal);
128 signal(SIGQUIT, ui__signal); 150 signal(SIGQUIT, ui__signal);
129 signal(SIGTERM, ui__signal); 151 signal(SIGTERM, ui__signal);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 0784a9420528..cadbdc90a5cb 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -116,11 +116,6 @@ struct annotation {
116 struct annotated_source *src; 116 struct annotated_source *src;
117}; 117};
118 118
119struct sannotation {
120 struct annotation annotation;
121 struct symbol symbol;
122};
123
124static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) 119static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
125{ 120{
126 return (((void *)&notes->src->histograms) + 121 return (((void *)&notes->src->histograms) +
@@ -129,8 +124,7 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i
129 124
130static inline struct annotation *symbol__annotation(struct symbol *sym) 125static inline struct annotation *symbol__annotation(struct symbol *sym)
131{ 126{
132 struct sannotation *a = container_of(sym, struct sannotation, symbol); 127 return (void *)sym - symbol_conf.priv_size;
133 return &a->annotation;
134} 128}
135 129
136int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx); 130int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 5cf9e1b5989d..d04d770d90f6 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -71,7 +71,9 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
71extern char *perf_pathdup(const char *fmt, ...) 71extern char *perf_pathdup(const char *fmt, ...)
72 __attribute__((format (printf, 1, 2))); 72 __attribute__((format (printf, 1, 2)));
73 73
74#ifndef __UCLIBC__
74/* Matches the libc/libbsd function attribute so we declare this unconditionally: */ 75/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
75extern size_t strlcpy(char *dest, const char *src, size_t size); 76extern size_t strlcpy(char *dest, const char *src, size_t size);
77#endif
76 78
77#endif /* __PERF_CACHE_H */ 79#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 64b377e591e4..14e7a123d43b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
841 841
842 return bf; 842 return bf;
843} 843}
844
845static void free_callchain_node(struct callchain_node *node)
846{
847 struct callchain_list *list, *tmp;
848 struct callchain_node *child;
849 struct rb_node *n;
850
851 list_for_each_entry_safe(list, tmp, &node->val, list) {
852 list_del(&list->list);
853 free(list);
854 }
855
856 n = rb_first(&node->rb_root_in);
857 while (n) {
858 child = container_of(n, struct callchain_node, rb_node_in);
859 n = rb_next(n);
860 rb_erase(&child->rb_node_in, &node->rb_root_in);
861
862 free_callchain_node(child);
863 free(child);
864 }
865}
866
867void free_callchain(struct callchain_root *root)
868{
869 if (!symbol_conf.use_callchain)
870 return;
871
872 free_callchain_node(&root->node);
873}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index dbc08cf5f970..c0ec1acc38e4 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
198char *callchain_list__sym_name(struct callchain_list *cl, 198char *callchain_list__sym_name(struct callchain_list *cl,
199 char *bf, size_t bfsize, bool show_dso); 199 char *bf, size_t bfsize, bool show_dso);
200 200
201void free_callchain(struct callchain_root *root);
202
201#endif /* __PERF_CALLCHAIN_H */ 203#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6e88b9e395df..182395546ddc 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -6,6 +6,7 @@
6#include "evlist.h" 6#include "evlist.h"
7#include "evsel.h" 7#include "evsel.h"
8#include "annotate.h" 8#include "annotate.h"
9#include "ui/progress.h"
9#include <math.h> 10#include <math.h>
10 11
11static bool hists__filter_entry_by_dso(struct hists *hists, 12static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
303 size_t callchain_size = 0; 304 size_t callchain_size = 0;
304 struct hist_entry *he; 305 struct hist_entry *he;
305 306
306 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) 307 if (symbol_conf.use_callchain)
307 callchain_size = sizeof(struct callchain_root); 308 callchain_size = sizeof(struct callchain_root);
308 309
309 he = zalloc(sizeof(*he) + callchain_size); 310 he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
736 iter->he = he; 737 iter->he = he;
737 he_cache[iter->curr++] = he; 738 he_cache[iter->curr++] = he;
738 739
739 callchain_append(he->callchain, &callchain_cursor, sample->period); 740 hist_entry__append_callchain(he, sample);
740 741
741 /* 742 /*
742 * We need to re-initialize the cursor since callchain_append() 743 * We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
809 iter->he = he; 810 iter->he = he;
810 he_cache[iter->curr++] = he; 811 he_cache[iter->curr++] = he;
811 812
812 callchain_append(he->callchain, &cursor, sample->period); 813 if (symbol_conf.use_callchain)
814 callchain_append(he->callchain, &cursor, sample->period);
813 return 0; 815 return 0;
814} 816}
815 817
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
945 zfree(&he->mem_info); 947 zfree(&he->mem_info);
946 zfree(&he->stat_acc); 948 zfree(&he->stat_acc);
947 free_srcline(he->srcline); 949 free_srcline(he->srcline);
950 free_callchain(he->callchain);
948 free(he); 951 free(he);
949} 952}
950 953
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
987 else 990 else
988 p = &(*p)->rb_right; 991 p = &(*p)->rb_right;
989 } 992 }
993 hists->nr_entries++;
990 994
991 rb_link_node(&he->rb_node_in, parent, p); 995 rb_link_node(&he->rb_node_in, parent, p);
992 rb_insert_color(&he->rb_node_in, root); 996 rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1024 if (!sort__need_collapse) 1028 if (!sort__need_collapse)
1025 return; 1029 return;
1026 1030
1031 hists->nr_entries = 0;
1032
1027 root = hists__get_rotate_entries_in(hists); 1033 root = hists__get_rotate_entries_in(hists);
1034
1028 next = rb_first(root); 1035 next = rb_first(root);
1029 1036
1030 while (next) { 1037 while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
1119 rb_insert_color(&he->rb_node, entries); 1126 rb_insert_color(&he->rb_node, entries);
1120} 1127}
1121 1128
1122void hists__output_resort(struct hists *hists) 1129void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1123{ 1130{
1124 struct rb_root *root; 1131 struct rb_root *root;
1125 struct rb_node *next; 1132 struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
1148 1155
1149 if (!n->filtered) 1156 if (!n->filtered)
1150 hists__calc_col_len(hists, n); 1157 hists__calc_col_len(hists, n);
1158
1159 if (prog)
1160 ui_progress__update(prog, 1);
1151 } 1161 }
1152} 1162}
1153 1163
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index d0ef9a19a744..46bd50344f85 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
121 struct hists *hists); 121 struct hists *hists);
122void hist_entry__free(struct hist_entry *); 122void hist_entry__free(struct hist_entry *);
123 123
124void hists__output_resort(struct hists *hists); 124void hists__output_resort(struct hists *hists, struct ui_progress *prog);
125void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); 125void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
126 126
127void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel); 127void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
deleted file mode 100644
index 5c1d0d099f0d..000000000000
--- a/tools/perf/util/hweight.c
+++ /dev/null
@@ -1,31 +0,0 @@
1#include <linux/bitops.h>
2
3/**
4 * hweightN - returns the hamming weight of a N-bit word
5 * @x: the word to weigh
6 *
7 * The Hamming Weight of a number is the total number of bits set in it.
8 */
9
10unsigned int hweight32(unsigned int w)
11{
12 unsigned int res = w - ((w >> 1) & 0x55555555);
13 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
14 res = (res + (res >> 4)) & 0x0F0F0F0F;
15 res = res + (res >> 8);
16 return (res + (res >> 16)) & 0x000000FF;
17}
18
19unsigned long hweight64(__u64 w)
20{
21#if BITS_PER_LONG == 32
22 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
23#elif BITS_PER_LONG == 64
24 __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
25 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
26 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
27 res = res + (res >> 8);
28 res = res + (res >> 16);
29 return (res + (res >> 32)) & 0x00000000000000FFul;
30#endif
31}
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
deleted file mode 100644
index 36cf26d434a5..000000000000
--- a/tools/perf/util/include/asm/hweight.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef PERF_HWEIGHT_H
2#define PERF_HWEIGHT_H
3
4#include <linux/types.h>
5unsigned int hweight32(unsigned int w);
6unsigned long hweight64(__u64 w);
7
8#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 94de3e48b490..1bca3a9f2b16 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -389,7 +389,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
389 if (th != NULL) { 389 if (th != NULL) {
390 rb_link_node(&th->rb_node, parent, p); 390 rb_link_node(&th->rb_node, parent, p);
391 rb_insert_color(&th->rb_node, &machine->threads); 391 rb_insert_color(&th->rb_node, &machine->threads);
392 machine->last_match = th;
393 392
394 /* 393 /*
395 * We have to initialize map_groups separately 394 * We have to initialize map_groups separately
@@ -400,9 +399,12 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
400 * leader and that would screwed the rb tree. 399 * leader and that would screwed the rb tree.
401 */ 400 */
402 if (thread__init_map_groups(th, machine)) { 401 if (thread__init_map_groups(th, machine)) {
402 rb_erase(&th->rb_node, &machine->threads);
403 thread__delete(th); 403 thread__delete(th);
404 return NULL; 404 return NULL;
405 } 405 }
406
407 machine->last_match = th;
406 } 408 }
407 409
408 return th; 410 return th;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 28eb1417cb2a..94a717bf007d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
495 } 495 }
496 496
497 if (ntevs == 0) { /* No error but failed to find probe point. */ 497 if (ntevs == 0) { /* No error but failed to find probe point. */
498 pr_warning("Probe point '%s' not found.\n", 498 pr_warning("Probe point '%s' not found in debuginfo.\n",
499 synthesize_perf_probe_point(&pev->point)); 499 synthesize_perf_probe_point(&pev->point));
500 return -ENOENT; 500 if (need_dwarf)
501 return -ENOENT;
502 return 0;
501 } 503 }
502 /* Error path : ntevs < 0 */ 504 /* Error path : ntevs < 0 */
503 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); 505 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
@@ -2050,9 +2052,11 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
2050 pr_debug("Writing event: %s\n", buf); 2052 pr_debug("Writing event: %s\n", buf);
2051 if (!probe_event_dry_run) { 2053 if (!probe_event_dry_run) {
2052 ret = write(fd, buf, strlen(buf)); 2054 ret = write(fd, buf, strlen(buf));
2053 if (ret <= 0) 2055 if (ret <= 0) {
2056 ret = -errno;
2054 pr_warning("Failed to write event: %s\n", 2057 pr_warning("Failed to write event: %s\n",
2055 strerror_r(errno, sbuf, sizeof(sbuf))); 2058 strerror_r(errno, sbuf, sizeof(sbuf)));
2059 }
2056 } 2060 }
2057 free(buf); 2061 free(buf);
2058 return ret; 2062 return ret;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c7918f83b300..b5247d777f0e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
989 int ret = 0; 989 int ret = 0;
990 990
991#if _ELFUTILS_PREREQ(0, 142) 991#if _ELFUTILS_PREREQ(0, 142)
992 Elf *elf;
993 GElf_Ehdr ehdr;
994 GElf_Shdr shdr;
995
992 /* Get the call frame information from this dwarf */ 996 /* Get the call frame information from this dwarf */
993 pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg)); 997 elf = dwarf_getelf(dbg->dbg);
998 if (elf == NULL)
999 return -EINVAL;
1000
1001 if (gelf_getehdr(elf, &ehdr) == NULL)
1002 return -EINVAL;
1003
1004 if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
1005 shdr.sh_type == SHT_PROGBITS) {
1006 pf->cfi = dwarf_getcfi_elf(elf);
1007 } else {
1008 pf->cfi = dwarf_getcfi(dbg->dbg);
1009 }
994#endif 1010#endif
995 1011
996 off = 0; 1012 off = 0;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 16a475a7d492..6c6a6953fa93 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,7 +10,7 @@ util/ctype.c
10util/evlist.c 10util/evlist.c
11util/evsel.c 11util/evsel.c
12util/cpumap.c 12util/cpumap.c
13util/hweight.c 13../../lib/hweight.c
14util/thread_map.c 14util/thread_map.c
15util/util.c 15util/util.c
16util/xyarray.c 16util/xyarray.c
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 371219a6daf1..6edf535f65c2 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -185,6 +185,28 @@ static u64 elf_section_offset(int fd, const char *name)
185 return offset; 185 return offset;
186} 186}
187 187
188#ifndef NO_LIBUNWIND_DEBUG_FRAME
189static int elf_is_exec(int fd, const char *name)
190{
191 Elf *elf;
192 GElf_Ehdr ehdr;
193 int retval = 0;
194
195 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
196 if (elf == NULL)
197 return 0;
198 if (gelf_getehdr(elf, &ehdr) == NULL)
199 goto out;
200
201 retval = (ehdr.e_type == ET_EXEC);
202
203out:
204 elf_end(elf);
205 pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
206 return retval;
207}
208#endif
209
188struct table_entry { 210struct table_entry {
189 u32 start_ip_offset; 211 u32 start_ip_offset;
190 u32 fde_offset; 212 u32 fde_offset;
@@ -322,8 +344,12 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
322#ifndef NO_LIBUNWIND_DEBUG_FRAME 344#ifndef NO_LIBUNWIND_DEBUG_FRAME
323 /* Check the .debug_frame section for unwinding info */ 345 /* Check the .debug_frame section for unwinding info */
324 if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { 346 if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
347 int fd = dso__data_fd(map->dso, ui->machine);
348 int is_exec = elf_is_exec(fd, map->dso->name);
349 unw_word_t base = is_exec ? 0 : map->start;
350
325 memset(&di, 0, sizeof(di)); 351 memset(&di, 0, sizeof(di));
326 if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name, 352 if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
327 map->start, map->end)) 353 map->start, map->end))
328 return dwarf_search_unwind_table(as, ip, &di, pi, 354 return dwarf_search_unwind_table(as, ip, &di, pi,
329 need_unwind_info, arg); 355 need_unwind_info, arg);
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
index 7cdcf88659c7..9ea914378985 100644
--- a/tools/power/cpupower/utils/cpupower.c
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -199,7 +199,7 @@ int main(int argc, const char *argv[])
199 } 199 }
200 200
201 get_cpu_info(0, &cpupower_cpu_info); 201 get_cpu_info(0, &cpupower_cpu_info);
202 run_as_root = !getuid(); 202 run_as_root = !geteuid();
203 if (run_as_root) { 203 if (run_as_root) {
204 ret = uname(&uts); 204 ret = uname(&uts);
205 if (!ret && !strcmp(uts.machine, "x86_64") && 205 if (!ret && !strcmp(uts.machine, "x86_64") &&
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 09afe5d87f2b..4e8fe2c7b054 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -361,7 +361,7 @@ unsigned int sysfs_get_idlestate_count(unsigned int cpu)
361 361
362 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle"); 362 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
363 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) 363 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
364 return -ENODEV; 364 return 0;
365 365
366 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu); 366 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
367 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) 367 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 33a5c06d95ca..e238c9559caf 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
62} 62}
63 63
64static int check_execveat_invoked_rc(int fd, const char *path, int flags, 64static int check_execveat_invoked_rc(int fd, const char *path, int flags,
65 int expected_rc) 65 int expected_rc, int expected_rc2)
66{ 66{
67 int status; 67 int status;
68 int rc; 68 int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
98 child, status); 98 child, status);
99 return 1; 99 return 1;
100 } 100 }
101 if (WEXITSTATUS(status) != expected_rc) { 101 if ((WEXITSTATUS(status) != expected_rc) &&
102 printf("[FAIL] (child %d exited with %d not %d)\n", 102 (WEXITSTATUS(status) != expected_rc2)) {
103 child, WEXITSTATUS(status), expected_rc); 103 printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
104 child, WEXITSTATUS(status), expected_rc, expected_rc2);
104 return 1; 105 return 1;
105 } 106 }
106 printf("[OK]\n"); 107 printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
109 110
110static int check_execveat(int fd, const char *path, int flags) 111static int check_execveat(int fd, const char *path, int flags)
111{ 112{
112 return check_execveat_invoked_rc(fd, path, flags, 99); 113 return check_execveat_invoked_rc(fd, path, flags, 99, 99);
113} 114}
114 115
115static char *concat(const char *left, const char *right) 116static char *concat(const char *left, const char *right)
@@ -179,11 +180,11 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
179 */ 180 */
180 fd = open(longpath, O_RDONLY); 181 fd = open(longpath, O_RDONLY);
181 if (fd > 0) { 182 if (fd > 0) {
182 printf("Invoke copy of '%s' via filename of length %lu:\n", 183 printf("Invoke copy of '%s' via filename of length %zu:\n",
183 src, strlen(longpath)); 184 src, strlen(longpath));
184 fail += check_execveat(fd, "", AT_EMPTY_PATH); 185 fail += check_execveat(fd, "", AT_EMPTY_PATH);
185 } else { 186 } else {
186 printf("Failed to open length %lu filename, errno=%d (%s)\n", 187 printf("Failed to open length %zu filename, errno=%d (%s)\n",
187 strlen(longpath), errno, strerror(errno)); 188 strlen(longpath), errno, strerror(errno));
188 fail++; 189 fail++;
189 } 190 }
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
192 * Execute as a long pathname relative to ".". If this is a script, 193 * Execute as a long pathname relative to ".". If this is a script,
193 * the interpreter will launch but fail to open the script because its 194 * the interpreter will launch but fail to open the script because its
194 * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. 195 * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
196 *
197 * The failure code is usually 127 (POSIX: "If a command is not found,
198 * the exit status shall be 127."), but some systems give 126 (POSIX:
199 * "If the command name is found, but it is not an executable utility,
200 * the exit status shall be 126."), so allow either.
195 */ 201 */
196 if (is_script) 202 if (is_script)
197 fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127); 203 fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
204 127, 126);
198 else 205 else
199 fail += check_execveat(dot_dfd, longpath, 0); 206 fail += check_execveat(dot_dfd, longpath, 0);
200 207
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 94dae65eea41..8519e9ee97e3 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
536{ 536{
537 struct mq_attr attr; 537 struct mq_attr attr;
538 char *option, *next_option; 538 char *option, *next_option;
539 int i, cpu; 539 int i, cpu, rc;
540 struct sigaction sa; 540 struct sigaction sa;
541 poptContext popt_context; 541 poptContext popt_context;
542 char rc;
543 void *retval; 542 void *retval;
544 543
545 main_thread = pthread_self(); 544 main_thread = pthread_self();
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 4c4b1f631ecf..077828c889f1 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
7 7
8all: $(BINARIES) 8all: $(BINARIES)
9%: %.c 9%: %.c
10 $(CC) $(CFLAGS) -o $@ $^ 10 $(CC) $(CFLAGS) -o $@ $^ -lrt
11 11
12run_tests: all 12run_tests: all
13 @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) 13 @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f5283438ee05..1cc6e2e19982 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -671,6 +671,7 @@ static void update_memslots(struct kvm_memslots *slots,
671 671
672 WARN_ON(mslots[i].id != id); 672 WARN_ON(mslots[i].id != id);
673 if (!new->npages) { 673 if (!new->npages) {
674 WARN_ON(!mslots[i].npages);
674 new->base_gfn = 0; 675 new->base_gfn = 0;
675 if (mslots[i].npages) 676 if (mslots[i].npages)
676 slots->used_slots--; 677 slots->used_slots--;
@@ -687,12 +688,25 @@ static void update_memslots(struct kvm_memslots *slots,
687 slots->id_to_index[mslots[i].id] = i; 688 slots->id_to_index[mslots[i].id] = i;
688 i++; 689 i++;
689 } 690 }
690 while (i > 0 && 691
691 new->base_gfn > mslots[i - 1].base_gfn) { 692 /*
692 mslots[i] = mslots[i - 1]; 693 * The ">=" is needed when creating a slot with base_gfn == 0,
693 slots->id_to_index[mslots[i].id] = i; 694 * so that it moves before all those with base_gfn == npages == 0.
694 i--; 695 *
695 } 696 * On the other hand, if new->npages is zero, the above loop has
697 * already left i pointing to the beginning of the empty part of
698 * mslots, and the ">=" would move the hole backwards in this
699 * case---which is wrong. So skip the loop when deleting a slot.
700 */
701 if (new->npages) {
702 while (i > 0 &&
703 new->base_gfn >= mslots[i - 1].base_gfn) {
704 mslots[i] = mslots[i - 1];
705 slots->id_to_index[mslots[i].id] = i;
706 i--;
707 }
708 } else
709 WARN_ON_ONCE(i != slots->used_slots);
696 710
697 mslots[i] = *new; 711 mslots[i] = *new;
698 slots->id_to_index[mslots[i].id] = i; 712 slots->id_to_index[mslots[i].id] = i;