aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt9
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/cgroup-v2.txt11
-rw-r--r--Documentation/dev-tools/kcov.rst2
-rw-r--r--Documentation/devicetree/bindings/powerpc/4xx/emac.txt62
-rw-r--r--Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt53
-rw-r--r--Documentation/networking/ip-sysctl.txt3
-rw-r--r--Documentation/trace/kprobetrace.txt2
-rw-r--r--Documentation/trace/uprobetracer.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt4
-rw-r--r--Documentation/vm/userfaultfd.txt4
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/hugepage.h1
-rw-r--r--arch/arc/include/asm/pgtable.h1
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/handle_exit.c19
-rw-r--r--arch/arm/xen/mm.c2
-rw-r--r--arch/arm64/Kconfig14
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/pgtable-types.h4
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kvm/handle_exit.c19
-rw-r--r--arch/arm64/kvm/hyp/tlb.c64
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/avr32/include/asm/pgtable-2level.h1
-rw-r--r--arch/avr32/oprofile/backtrace.c2
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/cris/include/asm/pgtable.h1
-rw-r--r--arch/frv/include/asm/pgtable.h1
-rw-r--r--arch/h8300/include/asm/pgtable.h1
-rw-r--r--arch/h8300/kernel/ptrace_h.c2
-rw-r--r--arch/hexagon/include/asm/pgtable.h1
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/metag/include/asm/pgtable.h1
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/mips/cavium-octeon/cpu.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c1
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/pgtable-32.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/smp-mt.c1
-rw-r--r--arch/mips/loongson64/loongson-3/cop2-ex.c1
-rw-r--r--arch/mips/netlogic/common/smp.c1
-rw-r--r--arch/mips/netlogic/xlp/cop2-ex.c3
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c3
-rw-r--r--arch/mips/sgi-ip32/ip32-berr.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c1
-rw-r--r--arch/mn10300/include/asm/page.h1
-rw-r--r--arch/nios2/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/Kconfig138
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/boot/zImage.lds.S1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c2
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h88
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h4
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/mce.h108
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h1
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/prom.h18
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S10
-rw-r--r--arch/powerpc/kernel/mce.c88
-rw-r--r--arch/powerpc/kernel/mce_power.c237
-rw-r--r--arch/powerpc/kernel/prom_init.c120
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/sstep.c20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c434
-rw-r--r--arch/powerpc/mm/init_64.c36
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/isa207-common.c43
-rw-r--r--arch/powerpc/perf/isa207-common.h1
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c20
-rw-r--r--arch/powerpc/purgatory/trampoline.S12
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c10
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c17
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig2
-rw-r--r--arch/s390/configs/performance_defconfig2
-rw-r--r--arch/s390/crypto/paes_s390.c5
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/include/asm/cputime.h20
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/timex.h12
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/entry.S10
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/pgtable.c19
-rw-r--r--arch/score/include/asm/pgtable.h1
-rw-r--r--arch/score/kernel/traps.c1
-rw-r--r--arch/score/mm/extable.c2
-rw-r--r--arch/sh/boards/mach-cayman/setup.c2
-rw-r--r--arch/sh/include/asm/pgtable-2level.h1
-rw-r--r--arch/sh/include/asm/pgtable-3level.h1
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/tile/include/asm/pgtable_32.h1
-rw-r--r--arch/tile/include/asm/pgtable_64.h1
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-3level.h1
-rw-r--r--arch/unicore32/include/asm/pgtable.h1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore.h6
-rw-r--r--arch/x86/hyperv/hv_init.c2
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/pkeys.h15
-rw-r--r--arch/x86/include/asm/purgatory.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/kernel/apic/apic.c23
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c1
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c1
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kdebugfs.c2
-rw-r--r--arch/x86/kernel/kprobes/common.h2
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c9
-rw-r--r--arch/x86/kernel/reboot.c16
-rw-r--r--arch/x86/kernel/tsc.c35
-rw-r--r--arch/x86/kvm/vmx.c30
-rw-r--r--arch/x86/mm/gup.c37
-rw-r--r--arch/x86/pci/common.c9
-rw-r--r--arch/x86/pci/xen.c23
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1
-rw-r--r--arch/x86/purgatory/purgatory.c36
-rw-r--r--arch/x86/purgatory/setup-x86_64.S1
-rw-r--r--arch/x86/purgatory/sha256.h1
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--block/bio.c12
-rw-r--r--block/blk-core.c41
-rw-r--r--block/blk-mq-sysfs.c40
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq.c37
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/genhd.c37
-rw-r--r--block/sed-opal.c10
-rw-r--r--crypto/af_alg.c9
-rw-r--r--crypto/algif_hash.c9
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/ioapic.c22
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/ata/ahci_qoriq.c6
-rw-r--r--drivers/ata/libata-sff.c1
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/rbd.c16
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/nwbutton.c2
-rw-r--r--drivers/char/random.c129
-rw-r--r--drivers/cpufreq/cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c67
-rw-r--r--drivers/crypto/s5p-sss.c132
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c2
-rw-r--r--drivers/firmware/efi/arm-runtime.c1
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c18
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c139
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c439
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c72
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c97
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c57
-rw-r--r--drivers/gpu/drm/i915/intel_display.c58
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c49
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c37
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c27
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c28
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c3
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c9
-rw-r--r--drivers/i2c/busses/i2c-riic.c6
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/irqchip/irq-crossbar.c9
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c3
-rw-r--r--drivers/isdn/hisax/st5481_b.c2
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c44
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c33
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/nuvoton-cir.c5
-rw-r--r--drivers/media/rc/rc-main.c26
-rw-r--r--drivers/media/rc/serial_ir.c123
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c244
-rw-r--r--drivers/misc/sgi-gru/grufault.c9
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c206
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c110
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c104
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h42
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h43
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c184
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c47
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c19
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c73
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c20
-rw-r--r--drivers/pci/dwc/pci-exynos.c8
-rw-r--r--drivers/pci/pcie/aspm.c5
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c15
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c12
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c49
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c451
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c128
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c107
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c19
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h13
-rw-r--r--drivers/scsi/qedf/qedf_fip.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c16
-rw-r--r--drivers/scsi/qedi/qedi_fw.c4
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h8
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c12
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/sd.c58
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/ufs/ufs.h22
-rw-r--r--drivers/scsi/ufs/ufshcd.c231
-rw-r--r--drivers/scsi/ufs/ufshcd.h15
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/tty/n_hdlc.c132
-rw-r--r--drivers/tty/serial/samsung.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c3
-rw-r--r--drivers/usb/dwc3/gadget.c76
-rw-r--r--drivers/usb/dwc3/gadget.h14
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c17
-rw-r--r--drivers/usb/gadget/function/f_uvc.c7
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c25
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c5
-rw-r--r--drivers/usb/host/ohci-at91.c4
-rw-r--r--drivers/usb/host/xhci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c7
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/misc/iowarrior.c21
-rw-r--r--drivers/usb/misc/usb251xb.c59
-rw-r--r--drivers/usb/phy/phy-isp1301.c7
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/io_ti.c8
-rw-r--r--drivers/usb/serial/omninet.c13
-rw-r--r--drivers/usb/serial/safe_serial.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/xen/gntdev.c11
-rw-r--r--drivers/xen/swiotlb-xen.c47
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c1
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/fat/inode.c13
-rw-r--r--fs/fs-writeback.c35
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/iomap.c17
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/overlayfs/util.c1
-rw-r--r--fs/timerfd.c8
-rw-r--r--fs/userfaultfd.c75
-rw-r--r--fs/xfs/kmem.c18
-rw-r--r--fs/xfs/kmem.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c34
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c87
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c26
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/xfs_aops.c59
-rw-r--r--fs/xfs/xfs_dir2_readdir.c11
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c14
-rw-r--r--fs/xfs/xfs_iomap.c25
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_reflink.c23
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/asm-generic/4level-fixup.h3
-rw-r--r--include/asm-generic/5level-fixup.h41
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h62
-rw-r--r--include/asm-generic/pgtable-nop4d.h56
-rw-r--r--include/asm-generic/pgtable-nopud.h48
-rw-r--r--include/asm-generic/pgtable.h48
-rw-r--r--include/asm-generic/tlb.h14
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/dt-bindings/sound/cs42l42.h2
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/dccp.h1
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/filter.h16
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqdomain.h4
-rw-r--r--include/linux/jump_label.h11
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/list_nulls.h5
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/purgatory.h23
-rw-r--r--include/linux/random.h18
-rw-r--r--include/linux/rculist_nulls.h14
-rw-r--r--include/linux/regulator/machine.h2
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/userfaultfd_k.h13
-rw-r--r--include/linux/vm_event_item.h3
-rw-r--r--include/linux/wait.h31
-rw-r--r--include/media/vsp1.h13
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/irda/timer.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h9
-rw-r--r--include/scsi/libiscsi.h1
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/trace/events/syscalls.h1
-rw-r--r--include/uapi/drm/omap_drm.h38
-rw-r--r--include/uapi/linux/packet_diag.h2
-rw-r--r--include/uapi/linux/userfaultfd.h5
-rw-r--r--include/xen/swiotlb-xen.h11
-rw-r--r--init/main.c1
-rw-r--r--kernel/bpf/hashtab.c119
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c2
-rw-r--r--kernel/cgroup/pids.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/kexec_file.c8
-rw-r--r--kernel/kexec_internal.h6
-rw-r--r--kernel/locking/lockdep.c11
-rw-r--r--kernel/locking/test-ww_mutex.c6
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/cpufreq_schedutil.c19
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/features.h5
-rw-r--r--kernel/sched/wait.c39
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/trace/Kconfig6
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c23
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_probe.h4
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/ucount.c18
-rw-r--r--kernel/workqueue.c1
-rw-r--r--lib/ioremap.c39
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/refcount.c14
-rw-r--r--mm/backing-dev.c43
-rw-r--r--mm/gup.c46
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c29
-rw-r--r--mm/kasan/kasan_init.c44
-rw-r--r--mm/kasan/quarantine.c51
-rw-r--r--mm/madvise.c44
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/memcontrol.c18
-rw-r--r--mm/memory.c230
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/mlock.c10
-rw-r--r--mm/mprotect.c26
-rw-r--r--mm/mremap.c13
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/page_vma_mapped.c6
-rw-r--r--mm/pagewalk.c32
-rw-r--r--mm/percpu-vm.c7
-rw-r--r--mm/percpu.c5
-rw-r--r--mm/pgtable-generic.c6
-rw-r--r--mm/rmap.c20
-rw-r--r--mm/sparse-vmemmap.c22
-rw-r--r--mm/swapfile.c26
-rw-r--r--mm/userfaultfd.c23
-rw-r--r--mm/vmalloc.c84
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/z3fold.c1
-rw-r--r--net/atm/svc.c5
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c21
-rw-r--r--net/ceph/ceph_common.c15
-rw-r--r--net/ceph/osd_client.c36
-rw-r--r--net/ceph/osdmap.c4
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/sock.c106
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/minisocks.c24
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ipv4/af_inet.c9
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/netrom/af_netrom.c3
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/phonet/pep.c6
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib_cm.c47
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/tcp.c38
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_listen.c11
-rw-r--r--net/rose/af_rose.c3
-rw-r--r--net/rxrpc/input.c27
-rw-r--r--net/rxrpc/recvmsg.c4
-rw-r--r--net/rxrpc/sendmsg.c49
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/socket.c5
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/x25/af_x25.c3
-rw-r--r--net/xfrm/xfrm_policy.c19
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c2
-rw-r--r--scripts/module-common.lds2
-rw-r--r--scripts/spelling.txt3
-rw-r--r--sound/soc/amd/acp-pcm-dma.c2
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h18
-rw-r--r--tools/lguest/lguest.c2
-rw-r--r--tools/lib/bpf/Makefile2
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.h2
-rw-r--r--tools/objtool/builtin-check.c15
-rw-r--r--tools/objtool/elf.c12
-rw-r--r--tools/objtool/elf.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c2
-rwxr-xr-xtools/testing/ktest/ktest.pl21
-rw-r--r--tools/testing/radix-tree/Makefile15
-rw-r--r--tools/testing/radix-tree/benchmark.c173
-rw-r--r--tools/testing/radix-tree/idr-test.c78
-rw-r--r--tools/testing/radix-tree/main.c1
-rw-r--r--tools/testing/radix-tree/tag_check.c29
-rw-r--r--tools/testing/radix-tree/test.h1
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c4
-rw-r--r--tools/testing/selftests/powerpc/harness.c6
-rw-r--r--tools/testing/selftests/powerpc/include/vsx_asm.h48
-rw-r--r--tools/testing/selftests/vm/Makefile4
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c2
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c16
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c3
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c109
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c32
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
644 files changed, 8248 insertions, 3684 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 986e44387dad..2ba45caabada 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -653,6 +653,9 @@
653 cpuidle.off=1 [CPU_IDLE] 653 cpuidle.off=1 [CPU_IDLE]
654 disable the cpuidle sub-system 654 disable the cpuidle sub-system
655 655
656 cpufreq.off=1 [CPU_FREQ]
657 disable the cpufreq sub-system
658
656 cpu_init_udelay=N 659 cpu_init_udelay=N
657 [X86] Delay for N microsec between assert and de-assert 660 [X86] Delay for N microsec between assert and de-assert
658 of APIC INIT to start processors. This delay occurs 661 of APIC INIT to start processors. This delay occurs
@@ -1183,6 +1186,12 @@
1183 functions that can be changed at run time by the 1186 functions that can be changed at run time by the
1184 set_graph_notrace file in the debugfs tracing directory. 1187 set_graph_notrace file in the debugfs tracing directory.
1185 1188
1189 ftrace_graph_max_depth=<uint>
1190 [FTRACE] Used with the function graph tracer. This is
1191 the max depth it will trace into a function. This value
1192 can be changed at run time by the max_graph_depth file
1193 in the tracefs tracing directory. default: 0 (no limit)
1194
1186 gamecon.map[2|3]= 1195 gamecon.map[2|3]=
1187 [HW,JOY] Multisystem joystick and NES/SNES/PSX pad 1196 [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
1188 support via parallel port (up to 5 devices per port) 1197 support via parallel port (up to 5 devices per port)
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index a71b8095dbd8..2f66683500b8 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -68,3 +68,4 @@ stable kernels.
68| | | | | 68| | | | |
69| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | 69| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
70| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | 70| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
71| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 3b8449f8ac7e..49d7c997fa1e 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -1142,16 +1142,17 @@ used by the kernel.
1142 1142
1143 pids.max 1143 pids.max
1144 1144
1145 A read-write single value file which exists on non-root cgroups. The 1145 A read-write single value file which exists on non-root
1146 default is "max". 1146 cgroups. The default is "max".
1147 1147
1148 Hard limit of number of processes. 1148 Hard limit of number of processes.
1149 1149
1150 pids.current 1150 pids.current
1151 1151
1152 A read-only single value file which exists on all cgroups. 1152 A read-only single value file which exists on all cgroups.
1153 1153
1154 The number of processes currently in the cgroup and its descendants. 1154 The number of processes currently in the cgroup and its
1155 descendants.
1155 1156
1156Organisational operations are not blocked by cgroup policies, so it is 1157Organisational operations are not blocked by cgroup policies, so it is
1157possible to have pids.current > pids.max. This can be done by either 1158possible to have pids.current > pids.max. This can be done by either
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index 2c41b713841f..44886c91e112 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
10to collect more or less stable coverage that is function of syscall inputs. 10to collect more or less stable coverage that is function of syscall inputs.
11To achieve this goal it does not collect coverage in soft/hard interrupts 11To achieve this goal it does not collect coverage in soft/hard interrupts
12and instrumentation of some inherently non-deterministic parts of kernel is 12and instrumentation of some inherently non-deterministic parts of kernel is
13disbled (e.g. scheduler, locking). 13disabled (e.g. scheduler, locking).
14 14
15Usage 15Usage
16----- 16-----
diff --git a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
index 712baf6c3e24..44b842b6ca15 100644
--- a/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
+++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
@@ -71,6 +71,9 @@
71 For Axon it can be absent, though my current driver 71 For Axon it can be absent, though my current driver
72 doesn't handle phy-address yet so for now, keep 72 doesn't handle phy-address yet so for now, keep
73 0x00ffffff in it. 73 0x00ffffff in it.
74 - phy-handle : Used to describe configurations where a external PHY
75 is used. Please refer to:
76 Documentation/devicetree/bindings/net/ethernet.txt
74 - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec 77 - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
75 operations (if absent the value is the same as 78 operations (if absent the value is the same as
76 rx-fifo-size). For Axon, either absent or 2048. 79 rx-fifo-size). For Axon, either absent or 2048.
@@ -81,8 +84,22 @@
81 offload, phandle of the TAH device node. 84 offload, phandle of the TAH device node.
82 - tah-channel : 1 cell, optional. If appropriate, channel used on the 85 - tah-channel : 1 cell, optional. If appropriate, channel used on the
83 TAH engine. 86 TAH engine.
87 - fixed-link : Fixed-link subnode describing a link to a non-MDIO
88 managed entity. See
89 Documentation/devicetree/bindings/net/fixed-link.txt
90 for details.
91 - mdio subnode : When the EMAC has a phy connected to its local
92 mdio, which us supported by the kernel's network
93 PHY library in drivers/net/phy, there must be device
94 tree subnode with the following required properties:
95 - #address-cells: Must be <1>.
96 - #size-cells: Must be <0>.
84 97
85 Example: 98 For PHY definitions: Please refer to
99 Documentation/devicetree/bindings/net/phy.txt and
100 Documentation/devicetree/bindings/net/ethernet.txt
101
102 Examples:
86 103
87 EMAC0: ethernet@40000800 { 104 EMAC0: ethernet@40000800 {
88 device_type = "network"; 105 device_type = "network";
@@ -104,6 +121,48 @@
104 zmii-channel = <0>; 121 zmii-channel = <0>;
105 }; 122 };
106 123
124 EMAC1: ethernet@ef600c00 {
125 device_type = "network";
126 compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
127 interrupt-parent = <&EMAC1>;
128 interrupts = <0 1>;
129 #interrupt-cells = <1>;
130 #address-cells = <0>;
131 #size-cells = <0>;
132 interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
133 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
134 reg = <0xef600c00 0x000000c4>;
135 local-mac-address = [000000000000]; /* Filled in by U-Boot */
136 mal-device = <&MAL0>;
137 mal-tx-channel = <0>;
138 mal-rx-channel = <0>;
139 cell-index = <0>;
140 max-frame-size = <9000>;
141 rx-fifo-size = <16384>;
142 tx-fifo-size = <2048>;
143 fifo-entry-size = <10>;
144 phy-mode = "rgmii";
145 phy-handle = <&phy0>;
146 phy-map = <0x00000000>;
147 rgmii-device = <&RGMII0>;
148 rgmii-channel = <0>;
149 tah-device = <&TAH0>;
150 tah-channel = <0>;
151 has-inverted-stacr-oc;
152 has-new-stacr-staopc;
153
154 mdio {
155 #address-cells = <1>;
156 #size-cells = <0>;
157
158 phy0: ethernet-phy@0 {
159 compatible = "ethernet-phy-ieee802.3-c22";
160 reg = <0>;
161 };
162 };
163 };
164
165
107 ii) McMAL node 166 ii) McMAL node
108 167
109 Required properties: 168 Required properties:
@@ -145,4 +204,3 @@
145 - revision : as provided by the RGMII new version register if 204 - revision : as provided by the RGMII new version register if
146 available. 205 available.
147 For Axon: 0x0000012a 206 For Axon: 0x0000012a
148
diff --git a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
index c3f6546ebac7..6a23ad9ac53a 100644
--- a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
@@ -45,7 +45,7 @@ Required Properties:
45Optional Properties: 45Optional Properties:
46- reg-names: In addition to the required properties, the following are optional 46- reg-names: In addition to the required properties, the following are optional
47 - "efuse-address" - Contains efuse base address used to pick up ABB info. 47 - "efuse-address" - Contains efuse base address used to pick up ABB info.
48 - "ldo-address" - Contains address of ABB LDO overide register address. 48 - "ldo-address" - Contains address of ABB LDO override register.
49 "efuse-address" is required for this. 49 "efuse-address" is required for this.
50- ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override 50- ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override
51 register to provide override vset value. 51 register to provide override vset value.
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 0c065f77658f..3957d4edaa74 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -7,18 +7,18 @@ Required properties :
7 - compatible : Should be "microchip,usb251xb" or one of the specific types: 7 - compatible : Should be "microchip,usb251xb" or one of the specific types:
8 "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b", 8 "microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
9 "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi" 9 "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi"
10 - hub-reset-gpios : Should specify the gpio for hub reset 10 - reset-gpios : Should specify the gpio for hub reset
11 - reg : I2C address on the selected bus (default is <0x2C>)
11 12
12Optional properties : 13Optional properties :
13 - reg : I2C address on the selected bus (default is <0x2C>)
14 - skip-config : Skip Hub configuration, but only send the USB-Attach command 14 - skip-config : Skip Hub configuration, but only send the USB-Attach command
15 - vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424) 15 - vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
16 - product-id : USB Product ID of the hub (16 bit, default depends on type) 16 - product-id : Set USB Product ID of the hub (16 bit, default depends on type)
17 - device-id : USB Device ID of the hub (16 bit, default is 0x0bb3) 17 - device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3)
18 - language-id : USB Language ID (16 bit, default is 0x0000) 18 - language-id : Set USB Language ID (16 bit, default is 0x0000)
19 - manufacturer : USB Manufacturer string (max 31 characters long) 19 - manufacturer : Set USB Manufacturer string (max 31 characters long)
20 - product : USB Product string (max 31 characters long) 20 - product : Set USB Product string (max 31 characters long)
21 - serial : USB Serial string (max 31 characters long) 21 - serial : Set USB Serial string (max 31 characters long)
22 - {bus,self}-powered : selects between self- and bus-powered operation (default 22 - {bus,self}-powered : selects between self- and bus-powered operation (default
23 is self-powered) 23 is self-powered)
24 - disable-hi-speed : disable USB Hi-Speed support 24 - disable-hi-speed : disable USB Hi-Speed support
@@ -31,8 +31,10 @@ Optional properties :
31 (default is individual) 31 (default is individual)
32 - dynamic-power-switching : enable auto-switching from self- to bus-powered 32 - dynamic-power-switching : enable auto-switching from self- to bus-powered
33 operation if the local power source is removed or unavailable 33 operation if the local power source is removed or unavailable
34 - oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms) 34 - oc-delay-us : Delay time (in microseconds) for filtering the over-current
35 - compound-device : indicated the hub is part of a compound device 35 sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If
36 an invalid value is given, the default is used instead.
37 - compound-device : indicate the hub is part of a compound device
36 - port-mapping-mode : enable port mapping mode 38 - port-mapping-mode : enable port mapping mode
37 - string-support : enable string descriptor support (required for manufacturer, 39 - string-support : enable string descriptor support (required for manufacturer,
38 product and serial string configuration) 40 product and serial string configuration)
@@ -40,34 +42,15 @@ Optional properties :
40 device connected. 42 device connected.
41 - sp-disabled-ports : Specifies the ports which will be self-power disabled 43 - sp-disabled-ports : Specifies the ports which will be self-power disabled
42 - bp-disabled-ports : Specifies the ports which will be bus-power disabled 44 - bp-disabled-ports : Specifies the ports which will be bus-power disabled
43 - max-sp-power : Specifies the maximum current the hub consumes from an 45 - power-on-time-ms : Specifies the time it takes from the time the host
44 upstream port when operating as self-powered hub including the power 46 initiates the power-on sequence to a port until the port has adequate
45 consumption of a permanently attached peripheral if the hub is 47 power. The value is given in ms in a 0 - 510 range (default is 100ms).
46 configured as a compound device. The value is given in mA in a 0 - 500
47 range (default is 2).
48 - max-bp-power : Specifies the maximum current the hub consumes from an
49 upstream port when operating as bus-powered hub including the power
50 consumption of a permanently attached peripheral if the hub is
51 configured as a compound device. The value is given in mA in a 0 - 500
52 range (default is 100).
53 - max-sp-current : Specifies the maximum current the hub consumes from an
54 upstream port when operating as self-powered hub EXCLUDING the power
55 consumption of a permanently attached peripheral if the hub is
56 configured as a compound device. The value is given in mA in a 0 - 500
57 range (default is 2).
58 - max-bp-current : Specifies the maximum current the hub consumes from an
59 upstream port when operating as bus-powered hub EXCLUDING the power
60 consumption of a permanently attached peripheral if the hub is
61 configured as a compound device. The value is given in mA in a 0 - 500
62 range (default is 100).
63 - power-on-time : Specifies the time it takes from the time the host initiates
64 the power-on sequence to a port until the port has adequate power. The
65 value is given in ms in a 0 - 510 range (default is 100ms).
66 48
67Examples: 49Examples:
68 usb2512b@2c { 50 usb2512b@2c {
69 compatible = "microchip,usb2512b"; 51 compatible = "microchip,usb2512b";
70 hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; 52 reg = <0x2c>;
53 reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
71 }; 54 };
72 55
73 usb2514b@2c { 56 usb2514b@2c {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fc73eeb7b3b8..ab0230461377 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
1006 FALSE (router) 1006 FALSE (router)
1007 1007
1008forwarding - BOOLEAN 1008forwarding - BOOLEAN
1009 Enable IP forwarding on this interface. 1009 Enable IP forwarding on this interface. This controls whether packets
1010 received _on_ this interface can be forwarded.
1010 1011
1011mc_forwarding - BOOLEAN 1012mc_forwarding - BOOLEAN
1012 Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE 1013 Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index e4991fb1eedc..41ef9d8efe95 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
12functions). Unlike the Tracepoint based event, this can be added and removed 12functions). Unlike the Tracepoint based event, this can be added and removed
13dynamically, on the fly. 13dynamically, on the fly.
14 14
15To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y. 15To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
16 16
17Similar to the events tracer, this doesn't need to be activated via 17Similar to the events tracer, this doesn't need to be activated via
18current_tracer. Instead of that, add probe points via 18current_tracer. Instead of that, add probe points via
diff --git a/Documentation/trace/uprobetracer.txt b/Documentation/trace/uprobetracer.txt
index fa7b680ee8a0..bf526a7c5559 100644
--- a/Documentation/trace/uprobetracer.txt
+++ b/Documentation/trace/uprobetracer.txt
@@ -7,7 +7,7 @@
7Overview 7Overview
8-------- 8--------
9Uprobe based trace events are similar to kprobe based trace events. 9Uprobe based trace events are similar to kprobe based trace events.
10To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y. 10To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
11 11
12Similar to the kprobe-event tracer, this doesn't need to be activated via 12Similar to the kprobe-event tracer, this doesn't need to be activated via
13current_tracer. Instead of that, add probe points via 13current_tracer. Instead of that, add probe points via
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 069450938b79..3c248f772ae6 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
951slot. When changing an existing slot, it may be moved in the guest 951slot. When changing an existing slot, it may be moved in the guest
952physical memory space, or its flags may be modified. It may not be 952physical memory space, or its flags may be modified. It may not be
953resized. Slots may not overlap in guest physical address space. 953resized. Slots may not overlap in guest physical address space.
954Bits 0-15 of "slot" specifies the slot id and this value should be
955less than the maximum number of user memory slots supported per VM.
956The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
957if this capability is supported by the architecture.
954 958
955If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" 959If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
956specifies the address space which is being modified. They must be 960specifies the address space which is being modified. They must be
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt
index 0e5543a920e5..bb2f945f87ab 100644
--- a/Documentation/vm/userfaultfd.txt
+++ b/Documentation/vm/userfaultfd.txt
@@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
172manager has to explicitly enable these events by setting appropriate 172manager has to explicitly enable these events by setting appropriate
173bits in uffdio_api.features passed to UFFDIO_API ioctl: 173bits in uffdio_api.features passed to UFFDIO_API ioctl:
174 174
175UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
176non-cooperative process. When the monitored process exits, the uffd
177manager will get UFFD_EVENT_EXIT.
178
179UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When 175UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
180this feature is enabled, the userfaultfd context of the parent process 176this feature is enabled, the userfaultfd context of the parent process
181is duplicated into the newly created process. The manager receives 177is duplicated into the newly created process. The manager receives
diff --git a/MAINTAINERS b/MAINTAINERS
index c265a5fe4848..c776906f67a9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8307,7 +8307,6 @@ M: Richard Leitner <richard.leitner@skidata.com>
8307L: linux-usb@vger.kernel.org 8307L: linux-usb@vger.kernel.org
8308S: Maintained 8308S: Maintained
8309F: drivers/usb/misc/usb251xb.c 8309F: drivers/usb/misc/usb251xb.c
8310F: include/linux/platform_data/usb251xb.h
8311F: Documentation/devicetree/bindings/usb/usb251xb.txt 8310F: Documentation/devicetree/bindings/usb/usb251xb.txt
8312 8311
8313MICROSOFT SURFACE PRO 3 BUTTON DRIVER 8312MICROSOFT SURFACE PRO 3 BUTTON DRIVER
diff --git a/Makefile b/Makefile
index 165cf9783a5d..b841fb36beb2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 317ff773e1ca..b18fcb606908 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -11,6 +11,7 @@
11#define _ASM_ARC_HUGEPAGE_H 11#define _ASM_ARC_HUGEPAGE_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#define __ARCH_USE_5LEVEL_HACK
14#include <asm-generic/pgtable-nopmd.h> 15#include <asm-generic/pgtable-nopmd.h>
15 16
16static inline pte_t pmd_pte(pmd_t pmd) 17static inline pte_t pmd_pte(pmd_t pmd)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index e94ca72b974e..ee22d40afef4 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -37,6 +37,7 @@
37 37
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/mmu.h> 39#include <asm/mmu.h>
40#define __ARCH_USE_5LEVEL_HACK
40#include <asm-generic/pgtable-nopmd.h> 41#include <asm-generic/pgtable-nopmd.h>
41#include <linux/const.h> 42#include <linux/const.h>
42 43
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089fb44dc..a3f0b3d50089 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -209,6 +209,7 @@
209#define HSR_EC_IABT_HYP (0x21) 209#define HSR_EC_IABT_HYP (0x21)
210#define HSR_EC_DABT (0x24) 210#define HSR_EC_DABT (0x24)
211#define HSR_EC_DABT_HYP (0x25) 211#define HSR_EC_DABT_HYP (0x25)
212#define HSR_EC_MAX (0x3f)
212 213
213#define HSR_WFI_IS_WFE (_AC(1, UL) << 0) 214#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
214 215
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index cc495d799c67..31ee468ce667 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -30,7 +30,6 @@
30#define __KVM_HAVE_ARCH_INTC_INITIALIZED 30#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31 31
32#define KVM_USER_MEM_SLOTS 32 32#define KVM_USER_MEM_SLOTS 32
33#define KVM_PRIVATE_MEM_SLOTS 4
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 33#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35#define KVM_HAVE_ONE_REG 34#define KVM_HAVE_ONE_REG
36#define KVM_HALT_POLL_NS_DEFAULT 500000 35#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a8d656d9aec7..1c462381c225 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -20,6 +20,7 @@
20 20
21#else 21#else
22 22
23#define __ARCH_USE_5LEVEL_HACK
23#include <asm-generic/pgtable-nopud.h> 24#include <asm-generic/pgtable-nopud.h>
24#include <asm/memory.h> 25#include <asm/memory.h>
25#include <asm/pgtable-hwdef.h> 26#include <asm/pgtable-hwdef.h>
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c9a2103faeb9..96dba7cd8be7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
221 case KVM_CAP_MAX_VCPUS: 221 case KVM_CAP_MAX_VCPUS:
222 r = KVM_MAX_VCPUS; 222 r = KVM_MAX_VCPUS;
223 break; 223 break;
224 case KVM_CAP_NR_MEMSLOTS:
225 r = KVM_USER_MEM_SLOTS;
226 break;
224 case KVM_CAP_MSI_DEVID: 227 case KVM_CAP_MSI_DEVID:
225 if (!kvm) 228 if (!kvm)
226 r = -EINVAL; 229 r = -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d1955e35..96af65a30d78 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
79 return 1; 79 return 1;
80} 80}
81 81
82static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
83{
84 u32 hsr = kvm_vcpu_get_hsr(vcpu);
85
86 kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
87 hsr);
88
89 kvm_inject_undefined(vcpu);
90 return 1;
91}
92
82static exit_handle_fn arm_exit_handlers[] = { 93static exit_handle_fn arm_exit_handlers[] = {
94 [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
83 [HSR_EC_WFI] = kvm_handle_wfx, 95 [HSR_EC_WFI] = kvm_handle_wfx,
84 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
85 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
98{ 110{
99 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 111 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
100 112
101 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
102 !arm_exit_handlers[hsr_ec]) {
103 kvm_err("Unknown exception class: hsr: %#08x\n",
104 (unsigned int)kvm_vcpu_get_hsr(vcpu));
105 BUG();
106 }
107
108 return arm_exit_handlers[hsr_ec]; 113 return arm_exit_handlers[hsr_ec];
109} 114}
110 115
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index ce18c91b50a1..f0325d96b97a 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
198 .unmap_page = xen_swiotlb_unmap_page, 198 .unmap_page = xen_swiotlb_unmap_page,
199 .dma_supported = xen_swiotlb_dma_supported, 199 .dma_supported = xen_swiotlb_dma_supported,
200 .set_dma_mask = xen_swiotlb_set_dma_mask, 200 .set_dma_mask = xen_swiotlb_set_dma_mask,
201 .mmap = xen_swiotlb_dma_mmap,
202 .get_sgtable = xen_swiotlb_get_sgtable,
201}; 203};
202 204
203int __init xen_mm_init(void) 205int __init xen_mm_init(void)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a39029b5414e..3741859765cf 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009
508 508
509 If unsure, say Y. 509 If unsure, say Y.
510 510
511config QCOM_QDF2400_ERRATUM_0065
512 bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
513 default y
514 help
515 On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
516 ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
517 been indicated as 16Bytes (0xf), not 8Bytes (0x7).
518
519 If unsure, say Y.
520
511endmenu 521endmenu
512 522
513 523
@@ -1063,6 +1073,10 @@ config SYSVIPC_COMPAT
1063 def_bool y 1073 def_bool y
1064 depends on COMPAT && SYSVIPC 1074 depends on COMPAT && SYSVIPC
1065 1075
1076config KEYS_COMPAT
1077 def_bool y
1078 depends on COMPAT && KEYS
1079
1066endmenu 1080endmenu
1067 1081
1068menu "Power management options" 1082menu "Power management options"
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 05310ad8c5ab..f31c48d0cd68 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
251static inline bool system_uses_ttbr0_pan(void) 251static inline bool system_uses_ttbr0_pan(void)
252{ 252{
253 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && 253 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
254 !cpus_have_cap(ARM64_HAS_PAN); 254 !cpus_have_const_cap(ARM64_HAS_PAN);
255} 255}
256 256
257#endif /* __ASSEMBLY__ */ 257#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f21fd3894370..e7705e7bb07b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,8 +30,7 @@
30 30
31#define __KVM_HAVE_ARCH_INTC_INITIALIZED 31#define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 32
33#define KVM_USER_MEM_SLOTS 32 33#define KVM_USER_MEM_SLOTS 512
34#define KVM_PRIVATE_MEM_SLOTS 4
35#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
36#define KVM_HALT_POLL_NS_DEFAULT 500000 35#define KVM_HALT_POLL_NS_DEFAULT 500000
37 36
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index 69b2fd41503c..345a072b5856 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t;
55#define __pgprot(x) ((pgprot_t) { (x) } ) 55#define __pgprot(x) ((pgprot_t) { (x) } )
56 56
57#if CONFIG_PGTABLE_LEVELS == 2 57#if CONFIG_PGTABLE_LEVELS == 2
58#define __ARCH_USE_5LEVEL_HACK
58#include <asm-generic/pgtable-nopmd.h> 59#include <asm-generic/pgtable-nopmd.h>
59#elif CONFIG_PGTABLE_LEVELS == 3 60#elif CONFIG_PGTABLE_LEVELS == 3
61#define __ARCH_USE_5LEVEL_HACK
60#include <asm-generic/pgtable-nopud.h> 62#include <asm-generic/pgtable-nopud.h>
63#elif CONFIG_PGTABLE_LEVELS == 4
64#include <asm-generic/5level-fixup.h>
61#endif 65#endif
62 66
63#endif /* __ASM_PGTABLE_TYPES_H */ 67#endif /* __ASM_PGTABLE_TYPES_H */
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 75a0f8acef66..fd691087dc9a 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
30} 30}
31 31
32/** 32/**
33 * cpu_suspend() - function to enter a low-power idle state 33 * arm_cpuidle_suspend() - function to enter a low-power idle state
34 * @arg: argument to pass to CPU suspend operations 34 * @arg: argument to pass to CPU suspend operations
35 * 35 *
36 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU 36 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a07aae5b8a2..c5c45942fb6e 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
372 return 0; 372 return 0;
373} 373}
374 374
375int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
376 unsigned long val, void *data)
377{
378 return NOTIFY_DONE;
379}
380
381static void __kprobes kprobe_handler(struct pt_regs *regs) 375static void __kprobes kprobe_handler(struct pt_regs *regs)
382{ 376{
383 struct kprobe *p, *cur_kprobe; 377 struct kprobe *p, *cur_kprobe;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 1bfe30dfbfe7..fa1b18e364fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
135 return ret; 135 return ret;
136} 136}
137 137
138static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
139{
140 u32 hsr = kvm_vcpu_get_hsr(vcpu);
141
142 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
143 hsr, esr_get_class_string(hsr));
144
145 kvm_inject_undefined(vcpu);
146 return 1;
147}
148
138static exit_handle_fn arm_exit_handlers[] = { 149static exit_handle_fn arm_exit_handlers[] = {
150 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
139 [ESR_ELx_EC_WFx] = kvm_handle_wfx, 151 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
140 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, 152 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
141 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, 153 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
@@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
162 u32 hsr = kvm_vcpu_get_hsr(vcpu); 174 u32 hsr = kvm_vcpu_get_hsr(vcpu);
163 u8 hsr_ec = ESR_ELx_EC(hsr); 175 u8 hsr_ec = ESR_ELx_EC(hsr);
164 176
165 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
166 !arm_exit_handlers[hsr_ec]) {
167 kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
168 hsr, esr_get_class_string(hsr));
169 BUG();
170 }
171
172 return arm_exit_handlers[hsr_ec]; 177 return arm_exit_handlers[hsr_ec];
173} 178}
174 179
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index e8e7ba2bc11f..9e1d2b75eecd 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -18,14 +18,62 @@
18#include <asm/kvm_hyp.h> 18#include <asm/kvm_hyp.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20 20
21static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
22{
23 u64 val;
24
25 /*
26 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
27 * most TLB operations target EL2/EL0. In order to affect the
28 * guest TLBs (EL1/EL0), we need to change one of these two
29 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
30 * let's flip TGE before executing the TLB operation.
31 */
32 write_sysreg(kvm->arch.vttbr, vttbr_el2);
33 val = read_sysreg(hcr_el2);
34 val &= ~HCR_TGE;
35 write_sysreg(val, hcr_el2);
36 isb();
37}
38
39static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
40{
41 write_sysreg(kvm->arch.vttbr, vttbr_el2);
42 isb();
43}
44
45static hyp_alternate_select(__tlb_switch_to_guest,
46 __tlb_switch_to_guest_nvhe,
47 __tlb_switch_to_guest_vhe,
48 ARM64_HAS_VIRT_HOST_EXTN);
49
50static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
51{
52 /*
53 * We're done with the TLB operation, let's restore the host's
54 * view of HCR_EL2.
55 */
56 write_sysreg(0, vttbr_el2);
57 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
58}
59
60static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
61{
62 write_sysreg(0, vttbr_el2);
63}
64
65static hyp_alternate_select(__tlb_switch_to_host,
66 __tlb_switch_to_host_nvhe,
67 __tlb_switch_to_host_vhe,
68 ARM64_HAS_VIRT_HOST_EXTN);
69
21void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) 70void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
22{ 71{
23 dsb(ishst); 72 dsb(ishst);
24 73
25 /* Switch to requested VMID */ 74 /* Switch to requested VMID */
26 kvm = kern_hyp_va(kvm); 75 kvm = kern_hyp_va(kvm);
27 write_sysreg(kvm->arch.vttbr, vttbr_el2); 76 __tlb_switch_to_guest()(kvm);
28 isb();
29 77
30 /* 78 /*
31 * We could do so much better if we had the VA as well. 79 * We could do so much better if we had the VA as well.
@@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
46 dsb(ish); 94 dsb(ish);
47 isb(); 95 isb();
48 96
49 write_sysreg(0, vttbr_el2); 97 __tlb_switch_to_host()(kvm);
50} 98}
51 99
52void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) 100void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
55 103
56 /* Switch to requested VMID */ 104 /* Switch to requested VMID */
57 kvm = kern_hyp_va(kvm); 105 kvm = kern_hyp_va(kvm);
58 write_sysreg(kvm->arch.vttbr, vttbr_el2); 106 __tlb_switch_to_guest()(kvm);
59 isb();
60 107
61 __tlbi(vmalls12e1is); 108 __tlbi(vmalls12e1is);
62 dsb(ish); 109 dsb(ish);
63 isb(); 110 isb();
64 111
65 write_sysreg(0, vttbr_el2); 112 __tlb_switch_to_host()(kvm);
66} 113}
67 114
68void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) 115void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
70 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); 117 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
71 118
72 /* Switch to requested VMID */ 119 /* Switch to requested VMID */
73 write_sysreg(kvm->arch.vttbr, vttbr_el2); 120 __tlb_switch_to_guest()(kvm);
74 isb();
75 121
76 __tlbi(vmalle1); 122 __tlbi(vmalle1);
77 dsb(nsh); 123 dsb(nsh);
78 isb(); 124 isb();
79 125
80 write_sysreg(0, vttbr_el2); 126 __tlb_switch_to_host()(kvm);
81} 127}
82 128
83void __hyp_text __kvm_flush_vm_context(void) 129void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 55d1e9205543..687a358a3733 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -162,7 +162,7 @@ void __init kasan_init(void)
162 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 162 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
163 163
164 vmemmap_populate(kimg_shadow_start, kimg_shadow_end, 164 vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
165 pfn_to_nid(virt_to_pfn(_text))); 165 pfn_to_nid(virt_to_pfn(lm_alias(_text))));
166 166
167 /* 167 /*
168 * vmemmap_populate() has populated the shadow region that covers the 168 * vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h
index 425dd567b5b9..d5b1c63993ec 100644
--- a/arch/avr32/include/asm/pgtable-2level.h
+++ b/arch/avr32/include/asm/pgtable-2level.h
@@ -8,6 +8,7 @@
8#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H 8#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
9#define __ASM_AVR32_PGTABLE_2LEVEL_H 9#define __ASM_AVR32_PGTABLE_2LEVEL_H
10 10
11#define __ARCH_USE_5LEVEL_HACK
11#include <asm-generic/pgtable-nopmd.h> 12#include <asm-generic/pgtable-nopmd.h>
12 13
13/* 14/*
diff --git a/arch/avr32/oprofile/backtrace.c b/arch/avr32/oprofile/backtrace.c
index 75d9ad6f99cf..29cf2f191bfd 100644
--- a/arch/avr32/oprofile/backtrace.c
+++ b/arch/avr32/oprofile/backtrace.c
@@ -14,7 +14,7 @@
14 */ 14 */
15 15
16#include <linux/oprofile.h> 16#include <linux/oprofile.h>
17#include <linux/sched.h> 17#include <linux/ptrace.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19 19
20/* The first two words of each frame on the stack look like this if we have 20/* The first two words of each frame on the stack look like this if we have
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index ae6903d7fdbe..14970f11bbf2 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
2086 dma_in_cfg.en = regk_dma_no; 2086 dma_in_cfg.en = regk_dma_no;
2087 REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg); 2087 REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
2088 2088
2089 /* Disble the cryptocop. */ 2089 /* Disable the cryptocop. */
2090 rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); 2090 rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
2091 rw_cfg.en = 0; 2091 rw_cfg.en = 0;
2092 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); 2092 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 2a3210ba4c72..fa3a73004cc5 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -6,6 +6,7 @@
6#define _CRIS_PGTABLE_H 6#define _CRIS_PGTABLE_H
7 7
8#include <asm/page.h> 8#include <asm/page.h>
9#define __ARCH_USE_5LEVEL_HACK
9#include <asm-generic/pgtable-nopmd.h> 10#include <asm-generic/pgtable-nopmd.h>
10 11
11#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index a0513d463a1f..ab6e7e961b54 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -16,6 +16,7 @@
16#ifndef _ASM_PGTABLE_H 16#ifndef _ASM_PGTABLE_H
17#define _ASM_PGTABLE_H 17#define _ASM_PGTABLE_H
18 18
19#include <asm-generic/5level-fixup.h>
19#include <asm/mem-layout.h> 20#include <asm/mem-layout.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <asm/processor.h> 22#include <asm/processor.h>
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index 8341db67821d..7d265d28ba5e 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -1,5 +1,6 @@
1#ifndef _H8300_PGTABLE_H 1#ifndef _H8300_PGTABLE_H
2#define _H8300_PGTABLE_H 2#define _H8300_PGTABLE_H
3#define __ARCH_USE_5LEVEL_HACK
3#include <asm-generic/pgtable-nopud.h> 4#include <asm-generic/pgtable-nopud.h>
4#include <asm-generic/pgtable.h> 5#include <asm-generic/pgtable.h>
5#define pgtable_cache_init() do { } while (0) 6#define pgtable_cache_init() do { } while (0)
diff --git a/arch/h8300/kernel/ptrace_h.c b/arch/h8300/kernel/ptrace_h.c
index fe3b5673baba..f5ff3b794c85 100644
--- a/arch/h8300/kernel/ptrace_h.c
+++ b/arch/h8300/kernel/ptrace_h.c
@@ -9,7 +9,7 @@
9 */ 9 */
10 10
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/sched.h> 12#include <linux/sched/signal.h>
13#include <asm/ptrace.h> 13#include <asm/ptrace.h>
14 14
15#define BREAKINST 0x5730 /* trapa #3 */ 15#define BREAKINST 0x5730 /* trapa #3 */
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 49eab8136ec3..24a9177fb897 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -26,6 +26,7 @@
26 */ 26 */
27#include <linux/swap.h> 27#include <linux/swap.h>
28#include <asm/page.h> 28#include <asm/page.h>
29#define __ARCH_USE_5LEVEL_HACK
29#include <asm-generic/pgtable-nopmd.h> 30#include <asm-generic/pgtable-nopmd.h>
30 31
31/* A handy thing to have if one has the RAM. Declared in head.S */ 32/* A handy thing to have if one has the RAM. Declared in head.S */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 384794e665fc..6cc22c8d8923 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr;
587 587
588 588
589#if CONFIG_PGTABLE_LEVELS == 3 589#if CONFIG_PGTABLE_LEVELS == 3
590#define __ARCH_USE_5LEVEL_HACK
590#include <asm-generic/pgtable-nopud.h> 591#include <asm-generic/pgtable-nopud.h>
591#endif 592#endif
593#include <asm-generic/5level-fixup.h>
592#include <asm-generic/pgtable.h> 594#include <asm-generic/pgtable.h>
593 595
594#endif /* _ASM_IA64_PGTABLE_H */ 596#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index ffa3a3a2ecad..0c151e5af079 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -6,6 +6,7 @@
6#define _METAG_PGTABLE_H 6#define _METAG_PGTABLE_H
7 7
8#include <asm/pgtable-bits.h> 8#include <asm/pgtable-bits.h>
9#define __ARCH_USE_5LEVEL_HACK
9#include <asm-generic/pgtable-nopmd.h> 10#include <asm-generic/pgtable-nopmd.h>
10 11
11/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ 12/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index fd850879854d..d506bb0893f9 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t;
95# else /* CONFIG_MMU */ 95# else /* CONFIG_MMU */
96typedef struct { unsigned long ste[64]; } pmd_t; 96typedef struct { unsigned long ste[64]; } pmd_t;
97typedef struct { pmd_t pue[1]; } pud_t; 97typedef struct { pmd_t pue[1]; } pud_t;
98typedef struct { pud_t pge[1]; } pgd_t; 98typedef struct { pud_t p4e[1]; } p4d_t;
99typedef struct { p4d_t pge[1]; } pgd_t;
99# endif /* CONFIG_MMU */ 100# endif /* CONFIG_MMU */
100 101
101# define pte_val(x) ((x).pte) 102# define pte_val(x) ((x).pte)
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
index a5b427909b5c..036d56cc4591 100644
--- a/arch/mips/cavium-octeon/cpu.c
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -10,7 +10,9 @@
10#include <linux/irqflags.h> 10#include <linux/irqflags.h>
11#include <linux/notifier.h> 11#include <linux/notifier.h>
12#include <linux/prefetch.h> 12#include <linux/prefetch.h>
13#include <linux/ptrace.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/sched/task_stack.h>
14 16
15#include <asm/cop2.h> 17#include <asm/cop2.h>
16#include <asm/current.h> 18#include <asm/current.h>
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
index 4d22365844af..cfb4a146cf17 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -9,6 +9,7 @@
9#include <asm/cop2.h> 9#include <asm/cop2.h>
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/sched/task_stack.h>
12 13
13#include "octeon-crypto.h" 14#include "octeon-crypto.h"
14 15
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 4b94b7fbafa3..3de786545ded 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -12,6 +12,7 @@
12#include <linux/kernel_stat.h> 12#include <linux/kernel_stat.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/sched/hotplug.h> 14#include <linux/sched/hotplug.h>
15#include <linux/sched/task_stack.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/export.h> 17#include <linux/export.h>
17 18
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 321752bcbab6..f94455f964ec 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/sched/task_stack.h> 14#include <linux/sched/task_stack.h>
15#include <linux/ptrace.h>
15#include <linux/thread_info.h> 16#include <linux/thread_info.h>
16#include <linux/bitops.h> 17#include <linux/bitops.h>
17 18
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index d21f3da7bdb6..6f94bed571c4 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -16,6 +16,7 @@
16#include <asm/cachectl.h> 16#include <asm/cachectl.h>
17#include <asm/fixmap.h> 17#include <asm/fixmap.h>
18 18
19#define __ARCH_USE_5LEVEL_HACK
19#include <asm-generic/pgtable-nopmd.h> 20#include <asm-generic/pgtable-nopmd.h>
20 21
21extern int temp_tlb_entry; 22extern int temp_tlb_entry;
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 514cbc0a6a67..130a2a6c1531 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,6 +17,7 @@
17#include <asm/cachectl.h> 17#include <asm/cachectl.h>
18#include <asm/fixmap.h> 18#include <asm/fixmap.h>
19 19
20#define __ARCH_USE_5LEVEL_HACK
20#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) 21#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
21#include <asm-generic/pgtable-nopmd.h> 22#include <asm-generic/pgtable-nopmd.h>
22#else 23#else
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 3daa2cae50b0..1b070a76fcdd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/sched/hotplug.h> 13#include <linux/sched/hotplug.h>
14#include <linux/sched/task_stack.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/smp.h> 17#include <linux/smp.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e077ea3e11fb..e398cbc3d776 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/irqchip/mips-gic.h> 24#include <linux/irqchip/mips-gic.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <linux/sched/task_stack.h>
26#include <linux/smp.h> 27#include <linux/smp.h>
27 28
28#include <linux/atomic.h> 29#include <linux/atomic.h>
diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c
index ea13764d0a03..621d6af5f6eb 100644
--- a/arch/mips/loongson64/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson64/loongson-3/cop2-ex.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/notifier.h> 15#include <linux/notifier.h>
16#include <linux/ptrace.h>
16 17
17#include <asm/fpu.h> 18#include <asm/fpu.h>
18#include <asm/cop2.h> 19#include <asm/cop2.h>
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 10d86d54880a..bddf1ef553a4 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -35,6 +35,7 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/sched/task_stack.h>
38#include <linux/smp.h> 39#include <linux/smp.h>
39#include <linux/irq.h> 40#include <linux/irq.h>
40 41
diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c
index 52bc5de42005..21e439b3db70 100644
--- a/arch/mips/netlogic/xlp/cop2-ex.c
+++ b/arch/mips/netlogic/xlp/cop2-ex.c
@@ -9,11 +9,14 @@
9 * Copyright (C) 2009 Wind River Systems, 9 * Copyright (C) 2009 Wind River Systems,
10 * written by Ralf Baechle <ralf@linux-mips.org> 10 * written by Ralf Baechle <ralf@linux-mips.org>
11 */ 11 */
12#include <linux/capability.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/irqflags.h> 14#include <linux/irqflags.h>
14#include <linux/notifier.h> 15#include <linux/notifier.h>
15#include <linux/prefetch.h> 16#include <linux/prefetch.h>
17#include <linux/ptrace.h>
16#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/sched/task_stack.h>
17 20
18#include <asm/cop2.h> 21#include <asm/cop2.h>
19#include <asm/current.h> 22#include <asm/current.h>
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 1f2a5bc4779e..75460e1e106b 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/sched/debug.h> 11#include <linux/sched/debug.h>
12#include <linux/sched/signal.h>
12#include <linux/seq_file.h> 13#include <linux/seq_file.h>
13 14
14#include <asm/addrspace.h> 15#include <asm/addrspace.h>
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index d12879eb2b1f..83efe03d5c60 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -12,7 +12,9 @@
12#include <linux/signal.h> /* for SIGBUS */ 12#include <linux/signal.h> /* for SIGBUS */
13#include <linux/sched.h> /* schow_regs(), force_sig() */ 13#include <linux/sched.h> /* schow_regs(), force_sig() */
14#include <linux/sched/debug.h> 14#include <linux/sched/debug.h>
15#include <linux/sched/signal.h>
15 16
17#include <asm/ptrace.h>
16#include <asm/sn/addrs.h> 18#include <asm/sn/addrs.h>
17#include <asm/sn/arch.h> 19#include <asm/sn/arch.h>
18#include <asm/sn/sn0/hub.h> 20#include <asm/sn/sn0/hub.h>
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f5ed45e8f442..4cd47d23d81a 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -8,10 +8,13 @@
8 */ 8 */
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/sched/task_stack.h>
11#include <linux/topology.h> 12#include <linux/topology.h>
12#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14
13#include <asm/page.h> 15#include <asm/page.h>
14#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/ptrace.h>
15#include <asm/sn/arch.h> 18#include <asm/sn/arch.h>
16#include <asm/sn/gda.h> 19#include <asm/sn/gda.h>
17#include <asm/sn/intr.h> 20#include <asm/sn/intr.h>
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c
index 57d8c7486fe6..c1f12a9cf305 100644
--- a/arch/mips/sgi-ip32/ip32-berr.c
+++ b/arch/mips/sgi-ip32/ip32-berr.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/sched/debug.h> 13#include <linux/sched/debug.h>
14#include <linux/sched/signal.h>
14#include <asm/traps.h> 15#include <asm/traps.h>
15#include <linux/uaccess.h> 16#include <linux/uaccess.h>
16#include <asm/addrspace.h> 17#include <asm/addrspace.h>
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 8bd415c8729f..b3b442def423 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/sched/signal.h>
16#include <linux/notifier.h> 17#include <linux/notifier.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/rtc/ds1685.h> 19#include <linux/rtc/ds1685.h>
diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h
index 3810a6f740fd..dfe730a5ede0 100644
--- a/arch/mn10300/include/asm/page.h
+++ b/arch/mn10300/include/asm/page.h
@@ -57,6 +57,7 @@ typedef struct page *pgtable_t;
57#define __pgd(x) ((pgd_t) { (x) }) 57#define __pgd(x) ((pgd_t) { (x) })
58#define __pgprot(x) ((pgprot_t) { (x) }) 58#define __pgprot(x) ((pgprot_t) { (x) })
59 59
60#define __ARCH_USE_5LEVEL_HACK
60#include <asm-generic/pgtable-nopmd.h> 61#include <asm-generic/pgtable-nopmd.h>
61 62
62#endif /* !__ASSEMBLY__ */ 63#endif /* !__ASSEMBLY__ */
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 298393c3cb42..db4f7d179220 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -22,6 +22,7 @@
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23 23
24#include <asm/pgtable-bits.h> 24#include <asm/pgtable-bits.h>
25#define __ARCH_USE_5LEVEL_HACK
25#include <asm-generic/pgtable-nopmd.h> 26#include <asm-generic/pgtable-nopmd.h>
26 27
27#define FIRST_USER_ADDRESS 0UL 28#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 3567aa7be555..ff97374ca069 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -25,6 +25,7 @@
25#ifndef __ASM_OPENRISC_PGTABLE_H 25#ifndef __ASM_OPENRISC_PGTABLE_H
26#define __ASM_OPENRISC_PGTABLE_H 26#define __ASM_OPENRISC_PGTABLE_H
27 27
28#define __ARCH_USE_5LEVEL_HACK
28#include <asm-generic/pgtable-nopmd.h> 29#include <asm-generic/pgtable-nopmd.h>
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 494091762bd7..97a8bc8a095c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
80config PPC 80config PPC
81 bool 81 bool
82 default y 82 default y
83 select BUILDTIME_EXTABLE_SORT 83 #
84 # Please keep this list sorted alphabetically.
85 #
86 select ARCH_HAS_DEVMEM_IS_ALLOWED
87 select ARCH_HAS_DMA_SET_COHERENT_MASK
88 select ARCH_HAS_ELF_RANDOMIZE
89 select ARCH_HAS_GCOV_PROFILE_ALL
90 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
91 select ARCH_HAS_SG_CHAIN
92 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
93 select ARCH_HAS_UBSAN_SANITIZE_ALL
94 select ARCH_HAVE_NMI_SAFE_CMPXCHG
84 select ARCH_MIGHT_HAVE_PC_PARPORT 95 select ARCH_MIGHT_HAVE_PC_PARPORT
85 select ARCH_MIGHT_HAVE_PC_SERIO 96 select ARCH_MIGHT_HAVE_PC_SERIO
97 select ARCH_SUPPORTS_ATOMIC_RMW
98 select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
99 select ARCH_USE_BUILTIN_BSWAP
100 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
101 select ARCH_WANT_IPC_PARSE_VERSION
86 select BINFMT_ELF 102 select BINFMT_ELF
87 select ARCH_HAS_ELF_RANDOMIZE 103 select BUILDTIME_EXTABLE_SORT
88 select OF 104 select CLONE_BACKWARDS
89 select OF_EARLY_FLATTREE 105 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
90 select OF_RESERVED_MEM 106 select EDAC_ATOMIC_SCRUB
91 select HAVE_FTRACE_MCOUNT_RECORD 107 select EDAC_SUPPORT
108 select GENERIC_ATOMIC64 if PPC32
109 select GENERIC_CLOCKEVENTS
110 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
111 select GENERIC_CMOS_UPDATE
112 select GENERIC_CPU_AUTOPROBE
113 select GENERIC_IRQ_SHOW
114 select GENERIC_IRQ_SHOW_LEVEL
115 select GENERIC_SMP_IDLE_THREAD
116 select GENERIC_STRNCPY_FROM_USER
117 select GENERIC_STRNLEN_USER
118 select GENERIC_TIME_VSYSCALL_OLD
119 select HAVE_ARCH_AUDITSYSCALL
120 select HAVE_ARCH_HARDENED_USERCOPY
121 select HAVE_ARCH_JUMP_LABEL
122 select HAVE_ARCH_KGDB
123 select HAVE_ARCH_SECCOMP_FILTER
124 select HAVE_ARCH_TRACEHOOK
125 select HAVE_CBPF_JIT if !PPC64
126 select HAVE_CONTEXT_TRACKING if PPC64
127 select HAVE_DEBUG_KMEMLEAK
128 select HAVE_DEBUG_STACKOVERFLOW
129 select HAVE_DMA_API_DEBUG
92 select HAVE_DYNAMIC_FTRACE 130 select HAVE_DYNAMIC_FTRACE
93 select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL 131 select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
94 select HAVE_FUNCTION_TRACER 132 select HAVE_EBPF_JIT if PPC64
133 select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
134 select HAVE_FTRACE_MCOUNT_RECORD
95 select HAVE_FUNCTION_GRAPH_TRACER 135 select HAVE_FUNCTION_GRAPH_TRACER
136 select HAVE_FUNCTION_TRACER
96 select HAVE_GCC_PLUGINS 137 select HAVE_GCC_PLUGINS
97 select SYSCTL_EXCEPTION_TRACE 138 select HAVE_GENERIC_RCU_GUP
98 select VIRT_TO_BUS if !PPC64 139 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
99 select HAVE_IDE 140 select HAVE_IDE
100 select HAVE_IOREMAP_PROT 141 select HAVE_IOREMAP_PROT
101 select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) 142 select HAVE_IRQ_EXIT_ON_IRQ_STACK
143 select HAVE_KERNEL_GZIP
102 select HAVE_KPROBES 144 select HAVE_KPROBES
103 select HAVE_OPTPROBES if PPC64
104 select HAVE_ARCH_KGDB
105 select HAVE_KRETPROBES 145 select HAVE_KRETPROBES
106 select HAVE_ARCH_TRACEHOOK 146 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
107 select HAVE_MEMBLOCK 147 select HAVE_MEMBLOCK
108 select HAVE_MEMBLOCK_NODE_MAP 148 select HAVE_MEMBLOCK_NODE_MAP
109 select HAVE_DMA_API_DEBUG 149 select HAVE_MOD_ARCH_SPECIFIC
150 select HAVE_NMI if PERF_EVENTS
110 select HAVE_OPROFILE 151 select HAVE_OPROFILE
111 select HAVE_DEBUG_KMEMLEAK 152 select HAVE_OPTPROBES if PPC64
112 select ARCH_HAS_SG_CHAIN
113 select GENERIC_ATOMIC64 if PPC32
114 select HAVE_PERF_EVENTS 153 select HAVE_PERF_EVENTS
154 select HAVE_PERF_EVENTS_NMI if PPC64
115 select HAVE_PERF_REGS 155 select HAVE_PERF_REGS
116 select HAVE_PERF_USER_STACK_DUMP 156 select HAVE_PERF_USER_STACK_DUMP
157 select HAVE_RCU_TABLE_FREE if SMP
117 select HAVE_REGS_AND_STACK_ACCESS_API 158 select HAVE_REGS_AND_STACK_ACCESS_API
118 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) 159 select HAVE_SYSCALL_TRACEPOINTS
119 select ARCH_WANT_IPC_PARSE_VERSION 160 select HAVE_VIRT_CPU_ACCOUNTING
120 select SPARSE_IRQ
121 select IRQ_DOMAIN 161 select IRQ_DOMAIN
122 select GENERIC_IRQ_SHOW
123 select GENERIC_IRQ_SHOW_LEVEL
124 select IRQ_FORCED_THREADING 162 select IRQ_FORCED_THREADING
125 select HAVE_RCU_TABLE_FREE if SMP
126 select HAVE_SYSCALL_TRACEPOINTS
127 select HAVE_CBPF_JIT if !PPC64
128 select HAVE_EBPF_JIT if PPC64
129 select HAVE_ARCH_JUMP_LABEL
130 select ARCH_HAVE_NMI_SAFE_CMPXCHG
131 select ARCH_HAS_GCOV_PROFILE_ALL
132 select GENERIC_SMP_IDLE_THREAD
133 select GENERIC_CMOS_UPDATE
134 select GENERIC_TIME_VSYSCALL_OLD
135 select GENERIC_CLOCKEVENTS
136 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
137 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
138 select GENERIC_STRNCPY_FROM_USER
139 select GENERIC_STRNLEN_USER
140 select HAVE_MOD_ARCH_SPECIFIC
141 select MODULES_USE_ELF_RELA 163 select MODULES_USE_ELF_RELA
142 select CLONE_BACKWARDS
143 select ARCH_USE_BUILTIN_BSWAP
144 select OLD_SIGSUSPEND
145 select OLD_SIGACTION if PPC32
146 select HAVE_DEBUG_STACKOVERFLOW
147 select HAVE_IRQ_EXIT_ON_IRQ_STACK
148 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
149 select HAVE_ARCH_AUDITSYSCALL
150 select ARCH_SUPPORTS_ATOMIC_RMW
151 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
152 select NO_BOOTMEM 164 select NO_BOOTMEM
153 select HAVE_GENERIC_RCU_GUP 165 select OF
154 select HAVE_PERF_EVENTS_NMI if PPC64 166 select OF_EARLY_FLATTREE
155 select HAVE_NMI if PERF_EVENTS 167 select OF_RESERVED_MEM
156 select EDAC_SUPPORT 168 select OLD_SIGACTION if PPC32
157 select EDAC_ATOMIC_SCRUB 169 select OLD_SIGSUSPEND
158 select ARCH_HAS_DMA_SET_COHERENT_MASK 170 select SPARSE_IRQ
159 select ARCH_HAS_DEVMEM_IS_ALLOWED 171 select SYSCTL_EXCEPTION_TRACE
160 select HAVE_ARCH_SECCOMP_FILTER 172 select VIRT_TO_BUS if !PPC64
161 select ARCH_HAS_UBSAN_SANITIZE_ALL 173 #
162 select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 174 # Please keep this list sorted alphabetically.
163 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 175 #
164 select GENERIC_CPU_AUTOPROBE
165 select HAVE_VIRT_CPU_ACCOUNTING
166 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
167 select HAVE_ARCH_HARDENED_USERCOPY
168 select HAVE_KERNEL_GZIP
169 select HAVE_CONTEXT_TRACKING if PPC64
170 176
171config GENERIC_CSUM 177config GENERIC_CSUM
172 def_bool n 178 def_bool n
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 31286fa7873c..19b0d1a81959 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -72,8 +72,15 @@ GNUTARGET := powerpc
72MULTIPLEWORD := -mmultiple 72MULTIPLEWORD := -mmultiple
73endif 73endif
74 74
75cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 75ifdef CONFIG_PPC64
76cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
77cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
78aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
79aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
80endif
81
76cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian 82cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
83cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
77ifneq ($(cc-name),clang) 84ifneq ($(cc-name),clang)
78 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align 85 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
79endif 86endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
113CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) 120CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
114AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) 121AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
115else 122else
123CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
116CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) 124CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
125AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
117endif 126endif
118CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) 127CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
119CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) 128CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
68 } 68 }
69 69
70#ifdef CONFIG_PPC64_BOOT_WRAPPER 70#ifdef CONFIG_PPC64_BOOT_WRAPPER
71 . = ALIGN(256);
71 .got : 72 .got :
72 { 73 {
73 __toc_start = .; 74 __toc_start = .;
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d56eba..411994551afc 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
52{ 52{
53 u32 *key = crypto_tfm_ctx(tfm); 53 u32 *key = crypto_tfm_ctx(tfm);
54 54
55 *key = 0; 55 *key = ~0;
56 56
57 return 0; 57 return 0;
58} 58}
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 73eb794d6163..bc5fdfd22788 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,6 +51,10 @@
51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 51#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) 52#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
53 53
54/* Put a PPC bit into a "normal" bit position */
55#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
56 ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
57
54#include <asm/barrier.h> 58#include <asm/barrier.h>
55 59
56/* Macro for generating the ***_bits() functions */ 60/* Macro for generating the ***_bits() functions */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 012223638815..26ed228d4dc6 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H 1#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
2#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H 2#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 3
4#define __ARCH_USE_5LEVEL_HACK
4#include <asm-generic/pgtable-nopmd.h> 5#include <asm-generic/pgtable-nopmd.h>
5 6
6#include <asm/book3s/32/hash.h> 7#include <asm/book3s/32/hash.h>
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 1eeeb72c7015..8f4d41936e5a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1,9 +1,12 @@
1#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ 1#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ 2#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3 3
4#include <asm-generic/5level-fixup.h>
5
4#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
5#include <linux/mmdebug.h> 7#include <linux/mmdebug.h>
6#endif 8#endif
9
7/* 10/*
8 * Common bits between hash and Radix page table 11 * Common bits between hash and Radix page table
9 */ 12 */
@@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
347 __r; \ 350 __r; \
348}) 351})
349 352
353static inline int __pte_write(pte_t pte)
354{
355 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
356}
357
358#ifdef CONFIG_NUMA_BALANCING
359#define pte_savedwrite pte_savedwrite
360static inline bool pte_savedwrite(pte_t pte)
361{
362 /*
363 * Saved write ptes are prot none ptes that doesn't have
364 * privileged bit sit. We mark prot none as one which has
365 * present and pviliged bit set and RWX cleared. To mark
366 * protnone which used to have _PAGE_WRITE set we clear
367 * the privileged bit.
368 */
369 return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
370}
371#else
372#define pte_savedwrite pte_savedwrite
373static inline bool pte_savedwrite(pte_t pte)
374{
375 return false;
376}
377#endif
378
379static inline int pte_write(pte_t pte)
380{
381 return __pte_write(pte) || pte_savedwrite(pte);
382}
383
350#define __HAVE_ARCH_PTEP_SET_WRPROTECT 384#define __HAVE_ARCH_PTEP_SET_WRPROTECT
351static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 385static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
352 pte_t *ptep) 386 pte_t *ptep)
353{ 387{
354 if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) 388 if (__pte_write(*ptep))
355 return; 389 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
356 390 else if (unlikely(pte_savedwrite(*ptep)))
357 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); 391 pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
358} 392}
359 393
360static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 394static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
361 unsigned long addr, pte_t *ptep) 395 unsigned long addr, pte_t *ptep)
362{ 396{
363 if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) 397 /*
364 return; 398 * We should not find protnone for hugetlb, but this complete the
365 399 * interface.
366 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); 400 */
401 if (__pte_write(*ptep))
402 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
403 else if (unlikely(pte_savedwrite(*ptep)))
404 pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
367} 405}
368 406
369#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 407#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
397 pte_update(mm, addr, ptep, ~0UL, 0, 0); 435 pte_update(mm, addr, ptep, ~0UL, 0, 0);
398} 436}
399 437
400static inline int pte_write(pte_t pte)
401{
402 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
403}
404
405static inline int pte_dirty(pte_t pte) 438static inline int pte_dirty(pte_t pte)
406{ 439{
407 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY)); 440 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
465 VM_BUG_ON(!pte_protnone(pte)); 498 VM_BUG_ON(!pte_protnone(pte));
466 return __pte(pte_val(pte) | _PAGE_PRIVILEGED); 499 return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
467} 500}
468 501#else
469#define pte_savedwrite pte_savedwrite 502#define pte_clear_savedwrite pte_clear_savedwrite
470static inline bool pte_savedwrite(pte_t pte) 503static inline pte_t pte_clear_savedwrite(pte_t pte)
471{ 504{
472 /* 505 VM_WARN_ON(1);
473 * Saved write ptes are prot none ptes that doesn't have 506 return __pte(pte_val(pte) & ~_PAGE_WRITE);
474 * privileged bit sit. We mark prot none as one which has
475 * present and pviliged bit set and RWX cleared. To mark
476 * protnone which used to have _PAGE_WRITE set we clear
477 * the privileged bit.
478 */
479 VM_BUG_ON(!pte_protnone(pte));
480 return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
481} 507}
482#endif /* CONFIG_NUMA_BALANCING */ 508#endif /* CONFIG_NUMA_BALANCING */
483 509
@@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
506/* Generic modifiers for PTE bits */ 532/* Generic modifiers for PTE bits */
507static inline pte_t pte_wrprotect(pte_t pte) 533static inline pte_t pte_wrprotect(pte_t pte)
508{ 534{
535 if (unlikely(pte_savedwrite(pte)))
536 return pte_clear_savedwrite(pte);
509 return __pte(pte_val(pte) & ~_PAGE_WRITE); 537 return __pte(pte_val(pte) & ~_PAGE_WRITE);
510} 538}
511 539
@@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
926 954
927#define __HAVE_ARCH_PMD_WRITE 955#define __HAVE_ARCH_PMD_WRITE
928#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 956#define pmd_write(pmd) pte_write(pmd_pte(pmd))
957#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
929#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) 958#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
930 959
931#ifdef CONFIG_TRANSPARENT_HUGEPAGE 960#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
982static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, 1011static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
983 pmd_t *pmdp) 1012 pmd_t *pmdp)
984{ 1013{
985 1014 if (__pmd_write((*pmdp)))
986 if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0) 1015 pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
987 return; 1016 else if (unlikely(pmd_savedwrite(*pmdp)))
988 1017 pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
989 pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
990} 1018}
991 1019
992static inline int pmd_trans_huge(pmd_t pmd) 1020static inline int pmd_trans_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 4e63787dc3be..842124b199b5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
112 112
113#ifdef __powerpc64__ 113#ifdef __powerpc64__
114 res += (__force u64)addend; 114 res += (__force u64)addend;
115 return (__force __wsum)((u32)res + (res >> 32)); 115 return (__force __wsum) from64to32(res);
116#else 116#else
117 asm("addc %0,%0,%1;" 117 asm("addc %0,%0,%1;"
118 "addze %0,%0;" 118 "addze %0,%0;"
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index fd321eb423cb..155731557c9b 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
70 std r0,0(r1); \ 70 std r0,0(r1); \
71 ptesync; \ 71 ptesync; \
72 ld r0,0(r1); \ 72 ld r0,0(r1); \
731: cmpd cr0,r0,r0; \ 73236: cmpd cr0,r0,r0; \
74 bne 1b; \ 74 bne 236b; \
75 IDLE_INST; \ 75 IDLE_INST; \
76 76
77#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ 77#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 93b9b84568e8..09bde6e34f5d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
144#define ARCH_DLINFO_CACHE_GEOMETRY \ 144#define ARCH_DLINFO_CACHE_GEOMETRY \
145 NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ 145 NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
146 NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ 146 NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
147 NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \ 147 NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
148 NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \ 148 NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
149 NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ 149 NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
150 NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ 150 NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
151 NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ 151 NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index f97d8cb6bdf6..ed62efe01e49 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -66,6 +66,55 @@
66 66
67#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ 67#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
68 P8_DSISR_MC_ERAT_MULTIHIT_SEC) 68 P8_DSISR_MC_ERAT_MULTIHIT_SEC)
69
70/*
71 * Machine Check bits on power9
72 */
73#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
74
75#define P9_SRR1_MC_IFETCH(srr1) ( \
76 PPC_BITEXTRACT(srr1, 45, 0) | \
77 PPC_BITEXTRACT(srr1, 44, 1) | \
78 PPC_BITEXTRACT(srr1, 43, 2) | \
79 PPC_BITEXTRACT(srr1, 36, 3) )
80
81/* 0 is reserved */
82#define P9_SRR1_MC_IFETCH_UE 1
83#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
84#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
85#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
86#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
87#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
88/* 7 is reserved */
89#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
90#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
91/* 10 ? */
92#define P9_SRR1_MC_IFETCH_RA 11
93#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
94#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
95#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
96#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
97
98/* DSISR bits for machine check (On Power9) */
99#define P9_DSISR_MC_UE (PPC_BIT(48))
100#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
101#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
102#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
103#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
104#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
105#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
106#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
107#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
108#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
109#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
110#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
111#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
112
113/* SLB error bits */
114#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
115 P9_DSISR_MC_SLB_PARITY_MFSLB | \
116 P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
117
69enum MCE_Version { 118enum MCE_Version {
70 MCE_V1 = 1, 119 MCE_V1 = 1,
71}; 120};
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
93 MCE_ERROR_TYPE_SLB = 2, 142 MCE_ERROR_TYPE_SLB = 2,
94 MCE_ERROR_TYPE_ERAT = 3, 143 MCE_ERROR_TYPE_ERAT = 3,
95 MCE_ERROR_TYPE_TLB = 4, 144 MCE_ERROR_TYPE_TLB = 4,
145 MCE_ERROR_TYPE_USER = 5,
146 MCE_ERROR_TYPE_RA = 6,
147 MCE_ERROR_TYPE_LINK = 7,
96}; 148};
97 149
98enum MCE_UeErrorType { 150enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
121 MCE_TLB_ERROR_MULTIHIT = 2, 173 MCE_TLB_ERROR_MULTIHIT = 2,
122}; 174};
123 175
176enum MCE_UserErrorType {
177 MCE_USER_ERROR_INDETERMINATE = 0,
178 MCE_USER_ERROR_TLBIE = 1,
179};
180
181enum MCE_RaErrorType {
182 MCE_RA_ERROR_INDETERMINATE = 0,
183 MCE_RA_ERROR_IFETCH = 1,
184 MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
185 MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
186 MCE_RA_ERROR_LOAD = 4,
187 MCE_RA_ERROR_STORE = 5,
188 MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
189 MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
190 MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
191};
192
193enum MCE_LinkErrorType {
194 MCE_LINK_ERROR_INDETERMINATE = 0,
195 MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
196 MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
197 MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
198 MCE_LINK_ERROR_STORE_TIMEOUT = 4,
199 MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
200};
201
124struct machine_check_event { 202struct machine_check_event {
125 enum MCE_Version version:8; /* 0x00 */ 203 enum MCE_Version version:8; /* 0x00 */
126 uint8_t in_use; /* 0x01 */ 204 uint8_t in_use; /* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
166 uint64_t effective_address; 244 uint64_t effective_address;
167 uint8_t reserved_2[16]; 245 uint8_t reserved_2[16];
168 } tlb_error; 246 } tlb_error;
247
248 struct {
249 enum MCE_UserErrorType user_error_type:8;
250 uint8_t effective_address_provided;
251 uint8_t reserved_1[6];
252 uint64_t effective_address;
253 uint8_t reserved_2[16];
254 } user_error;
255
256 struct {
257 enum MCE_RaErrorType ra_error_type:8;
258 uint8_t effective_address_provided;
259 uint8_t reserved_1[6];
260 uint64_t effective_address;
261 uint8_t reserved_2[16];
262 } ra_error;
263
264 struct {
265 enum MCE_LinkErrorType link_error_type:8;
266 uint8_t effective_address_provided;
267 uint8_t reserved_1[6];
268 uint64_t effective_address;
269 uint8_t reserved_2[16];
270 } link_error;
169 } u; 271 } u;
170}; 272};
171 273
@@ -176,8 +278,12 @@ struct mce_error_info {
176 enum MCE_SlbErrorType slb_error_type:8; 278 enum MCE_SlbErrorType slb_error_type:8;
177 enum MCE_EratErrorType erat_error_type:8; 279 enum MCE_EratErrorType erat_error_type:8;
178 enum MCE_TlbErrorType tlb_error_type:8; 280 enum MCE_TlbErrorType tlb_error_type:8;
281 enum MCE_UserErrorType user_error_type:8;
282 enum MCE_RaErrorType ra_error_type:8;
283 enum MCE_LinkErrorType link_error_type:8;
179 } u; 284 } u;
180 uint8_t reserved[2]; 285 enum MCE_Severity severity:8;
286 enum MCE_Initiator initiator:8;
181}; 287};
182 288
183#define MAX_MC_EVT 100 289#define MAX_MC_EVT 100
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index ba9921bf202e..5134ade2e850 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H 1#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
2#define _ASM_POWERPC_NOHASH_32_PGTABLE_H 2#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 3
4#define __ARCH_USE_5LEVEL_HACK
4#include <asm-generic/pgtable-nopmd.h> 5#include <asm-generic/pgtable-nopmd.h>
5 6
6#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index d0db98793dd8..9f4de0a1035e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -1,5 +1,8 @@
1#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H 1#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
2#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H 2#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
3
4#include <asm-generic/5level-fixup.h>
5
3/* 6/*
4 * Entries per page directory level. The PTE level must use a 64b record 7 * Entries per page directory level. The PTE level must use a 64b record
5 * for each page table entry. The PMD and PGD level use a 32b record for 8 * for each page table entry. The PMD and PGD level use a 32b record for
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 55b28ef3409a..1facb584dd29 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H 1#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
2#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H 2#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
3 3
4#define __ARCH_USE_5LEVEL_HACK
4#include <asm-generic/pgtable-nopud.h> 5#include <asm-generic/pgtable-nopud.h>
5 6
6 7
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 0cd8a3852763..e5805ad78e12 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
230 return ((hpd_val(hpd) & 0x4) != 0); 230 return ((hpd_val(hpd) & 0x4) != 0);
231#else 231#else
232 /* We clear the top bit to indicate hugepd */ 232 /* We clear the top bit to indicate hugepd */
233 return ((hpd_val(hpd) & PD_HUGE) == 0); 233 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
234#endif 234#endif
235} 235}
236 236
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d99bd442aacb..e7d6d86563ee 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -284,6 +284,13 @@
284#define PPC_INST_BRANCH_COND 0x40800000 284#define PPC_INST_BRANCH_COND 0x40800000
285#define PPC_INST_LBZCIX 0x7c0006aa 285#define PPC_INST_LBZCIX 0x7c0006aa
286#define PPC_INST_STBCIX 0x7c0007aa 286#define PPC_INST_STBCIX 0x7c0007aa
287#define PPC_INST_LWZX 0x7c00002e
288#define PPC_INST_LFSX 0x7c00042e
289#define PPC_INST_STFSX 0x7c00052e
290#define PPC_INST_LFDX 0x7c0004ae
291#define PPC_INST_STFDX 0x7c0005ae
292#define PPC_INST_LVX 0x7c0000ce
293#define PPC_INST_STVX 0x7c0001ce
287 294
288/* macros to insert fields into opcodes */ 295/* macros to insert fields into opcodes */
289#define ___PPC_RA(a) (((a) & 0x1f) << 16) 296#define ___PPC_RA(a) (((a) & 0x1f) << 16)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 4a90634e8322..35c00d7a0cf8 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -160,12 +160,18 @@ struct of_drconf_cell {
160#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ 160#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
161#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ 161#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
162#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ 162#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
163#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */ 163/* MMU Base Architecture */
164#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */ 164#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
165#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */ 165#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
166#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */ 166#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
167#define OV5_MMU_SLB 0x1800 /* always use SLB */ 167#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
168#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */ 168#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
169#define OV5_NMMU 0x1820 /* Nest MMU Available */
170/* Hash Table Extensions */
171#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
172#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
173/* Radix Table Extensions */
174#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
169 175
170/* Option Vector 6: IBM PAPR hints */ 176/* Option Vector 6: IBM PAPR hints */
171#define OV6_LINUX 0x02 /* Linux is our OS */ 177#define OV6_LINUX 0x02 /* Linux is our OS */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bb7a1890aeb7..e79b9daa873c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
77extern void __flush_tlb_power9(unsigned int action); 77extern void __flush_tlb_power9(unsigned int action);
78extern long __machine_check_early_realmode_p7(struct pt_regs *regs); 78extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
79extern long __machine_check_early_realmode_p8(struct pt_regs *regs); 79extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
80extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
80#endif /* CONFIG_PPC64 */ 81#endif /* CONFIG_PPC64 */
81#if defined(CONFIG_E500) 82#if defined(CONFIG_E500)
82extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); 83extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
540 .cpu_setup = __setup_cpu_power9, 541 .cpu_setup = __setup_cpu_power9,
541 .cpu_restore = __restore_cpu_power9, 542 .cpu_restore = __restore_cpu_power9,
542 .flush_tlb = __flush_tlb_power9, 543 .flush_tlb = __flush_tlb_power9,
544 .machine_check_early = __machine_check_early_realmode_p9,
543 .platform = "power9", 545 .platform = "power9",
544 }, 546 },
545 { /* Power9 */ 547 { /* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
559 .cpu_setup = __setup_cpu_power9, 561 .cpu_setup = __setup_cpu_power9,
560 .cpu_restore = __restore_cpu_power9, 562 .cpu_restore = __restore_cpu_power9,
561 .flush_tlb = __flush_tlb_power9, 563 .flush_tlb = __flush_tlb_power9,
564 .machine_check_early = __machine_check_early_realmode_p9,
562 .platform = "power9", 565 .platform = "power9",
563 }, 566 },
564 { /* Cell Broadband Engine */ 567 { /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5f61cc0349c0..995728736677 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -276,19 +276,21 @@ power_enter_stop:
276 */ 276 */
277 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 277 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
278 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 278 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
279 bne 1f 279 bne .Lhandle_esl_ec_set
280 IDLE_STATE_ENTER_SEQ(PPC_STOP) 280 IDLE_STATE_ENTER_SEQ(PPC_STOP)
281 li r3,0 /* Since we didn't lose state, return 0 */ 281 li r3,0 /* Since we didn't lose state, return 0 */
282 b pnv_wakeup_noloss 282 b pnv_wakeup_noloss
283
284.Lhandle_esl_ec_set:
283/* 285/*
284 * Check if the requested state is a deep idle state. 286 * Check if the requested state is a deep idle state.
285 */ 287 */
2861: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 288 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
287 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 289 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
288 cmpd r3,r4 290 cmpd r3,r4
289 bge 2f 291 bge .Lhandle_deep_stop
290 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 292 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
2912: 293.Lhandle_deep_stop:
292/* 294/*
293 * Entering deep idle state. 295 * Entering deep idle state.
294 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 296 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index c6923ff45131..a1475e6aef3a 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
58 case MCE_ERROR_TYPE_TLB: 58 case MCE_ERROR_TYPE_TLB:
59 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; 59 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
60 break; 60 break;
61 case MCE_ERROR_TYPE_USER:
62 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
63 break;
64 case MCE_ERROR_TYPE_RA:
65 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
66 break;
67 case MCE_ERROR_TYPE_LINK:
68 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
69 break;
61 case MCE_ERROR_TYPE_UNKNOWN: 70 case MCE_ERROR_TYPE_UNKNOWN:
62 default: 71 default:
63 break; 72 break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
90 mce->gpr3 = regs->gpr[3]; 99 mce->gpr3 = regs->gpr[3];
91 mce->in_use = 1; 100 mce->in_use = 1;
92 101
93 mce->initiator = MCE_INITIATOR_CPU;
94 /* Mark it recovered if we have handled it and MSR(RI=1). */ 102 /* Mark it recovered if we have handled it and MSR(RI=1). */
95 if (handled && (regs->msr & MSR_RI)) 103 if (handled && (regs->msr & MSR_RI))
96 mce->disposition = MCE_DISPOSITION_RECOVERED; 104 mce->disposition = MCE_DISPOSITION_RECOVERED;
97 else 105 else
98 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; 106 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
99 mce->severity = MCE_SEV_ERROR_SYNC; 107
108 mce->initiator = mce_err->initiator;
109 mce->severity = mce_err->severity;
100 110
101 /* 111 /*
102 * Populate the mce error_type and type-specific error_type. 112 * Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
115 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { 125 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
116 mce->u.erat_error.effective_address_provided = true; 126 mce->u.erat_error.effective_address_provided = true;
117 mce->u.erat_error.effective_address = addr; 127 mce->u.erat_error.effective_address = addr;
128 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
129 mce->u.user_error.effective_address_provided = true;
130 mce->u.user_error.effective_address = addr;
131 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
132 mce->u.ra_error.effective_address_provided = true;
133 mce->u.ra_error.effective_address = addr;
134 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
135 mce->u.link_error.effective_address_provided = true;
136 mce->u.link_error.effective_address = addr;
118 } else if (mce->error_type == MCE_ERROR_TYPE_UE) { 137 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
119 mce->u.ue_error.effective_address_provided = true; 138 mce->u.ue_error.effective_address_provided = true;
120 mce->u.ue_error.effective_address = addr; 139 mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
239 "Parity", 258 "Parity",
240 "Multihit", 259 "Multihit",
241 }; 260 };
261 static const char *mc_user_types[] = {
262 "Indeterminate",
263 "tlbie(l) invalid",
264 };
265 static const char *mc_ra_types[] = {
266 "Indeterminate",
267 "Instruction fetch (bad)",
268 "Page table walk ifetch (bad)",
269 "Page table walk ifetch (foreign)",
270 "Load (bad)",
271 "Store (bad)",
272 "Page table walk Load/Store (bad)",
273 "Page table walk Load/Store (foreign)",
274 "Load/Store (foreign)",
275 };
276 static const char *mc_link_types[] = {
277 "Indeterminate",
278 "Instruction fetch (timeout)",
279 "Page table walk ifetch (timeout)",
280 "Load (timeout)",
281 "Store (timeout)",
282 "Page table walk Load/Store (timeout)",
283 };
242 284
243 /* Print things out */ 285 /* Print things out */
244 if (evt->version != MCE_V1) { 286 if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
315 printk("%s Effective address: %016llx\n", 357 printk("%s Effective address: %016llx\n",
316 level, evt->u.tlb_error.effective_address); 358 level, evt->u.tlb_error.effective_address);
317 break; 359 break;
360 case MCE_ERROR_TYPE_USER:
361 subtype = evt->u.user_error.user_error_type <
362 ARRAY_SIZE(mc_user_types) ?
363 mc_user_types[evt->u.user_error.user_error_type]
364 : "Unknown";
365 printk("%s Error type: User [%s]\n", level, subtype);
366 if (evt->u.user_error.effective_address_provided)
367 printk("%s Effective address: %016llx\n",
368 level, evt->u.user_error.effective_address);
369 break;
370 case MCE_ERROR_TYPE_RA:
371 subtype = evt->u.ra_error.ra_error_type <
372 ARRAY_SIZE(mc_ra_types) ?
373 mc_ra_types[evt->u.ra_error.ra_error_type]
374 : "Unknown";
375 printk("%s Error type: Real address [%s]\n", level, subtype);
376 if (evt->u.ra_error.effective_address_provided)
377 printk("%s Effective address: %016llx\n",
378 level, evt->u.ra_error.effective_address);
379 break;
380 case MCE_ERROR_TYPE_LINK:
381 subtype = evt->u.link_error.link_error_type <
382 ARRAY_SIZE(mc_link_types) ?
383 mc_link_types[evt->u.link_error.link_error_type]
384 : "Unknown";
385 printk("%s Error type: Link [%s]\n", level, subtype);
386 if (evt->u.link_error.effective_address_provided)
387 printk("%s Effective address: %016llx\n",
388 level, evt->u.link_error.effective_address);
389 break;
318 default: 390 default:
319 case MCE_ERROR_TYPE_UNKNOWN: 391 case MCE_ERROR_TYPE_UNKNOWN:
320 printk("%s Error type: Unknown\n", level); 392 printk("%s Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
341 if (evt->u.tlb_error.effective_address_provided) 413 if (evt->u.tlb_error.effective_address_provided)
342 return evt->u.tlb_error.effective_address; 414 return evt->u.tlb_error.effective_address;
343 break; 415 break;
416 case MCE_ERROR_TYPE_USER:
417 if (evt->u.user_error.effective_address_provided)
418 return evt->u.user_error.effective_address;
419 break;
420 case MCE_ERROR_TYPE_RA:
421 if (evt->u.ra_error.effective_address_provided)
422 return evt->u.ra_error.effective_address;
423 break;
424 case MCE_ERROR_TYPE_LINK:
425 if (evt->u.link_error.effective_address_provided)
426 return evt->u.link_error.effective_address;
427 break;
344 default: 428 default:
345 case MCE_ERROR_TYPE_UNKNOWN: 429 case MCE_ERROR_TYPE_UNKNOWN:
346 break; 430 break;
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 7353991c4ece..763d6f58caa8 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
116} 116}
117#endif 117#endif
118 118
119static void flush_erat(void)
120{
121 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
122}
123
124#define MCE_FLUSH_SLB 1
125#define MCE_FLUSH_TLB 2
126#define MCE_FLUSH_ERAT 3
127
128static int mce_flush(int what)
129{
130#ifdef CONFIG_PPC_STD_MMU_64
131 if (what == MCE_FLUSH_SLB) {
132 flush_and_reload_slb();
133 return 1;
134 }
135#endif
136 if (what == MCE_FLUSH_ERAT) {
137 flush_erat();
138 return 1;
139 }
140 if (what == MCE_FLUSH_TLB) {
141 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
142 cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
143 return 1;
144 }
145 }
146
147 return 0;
148}
149
150static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
151{
152 if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
153 dsisr &= ~slb;
154 if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
155 dsisr &= ~erat;
156 if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
157 dsisr &= ~tlb;
158 /* Any other errors we don't understand? */
159 if (dsisr)
160 return 0;
161 return 1;
162}
163
119static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) 164static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
120{ 165{
121 long handled = 1; 166 long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
281 long handled = 1; 326 long handled = 1;
282 struct mce_error_info mce_error_info = { 0 }; 327 struct mce_error_info mce_error_info = { 0 };
283 328
329 mce_error_info.severity = MCE_SEV_ERROR_SYNC;
330 mce_error_info.initiator = MCE_INITIATOR_CPU;
331
284 srr1 = regs->msr; 332 srr1 = regs->msr;
285 nip = regs->nip; 333 nip = regs->nip;
286 334
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
352 long handled = 1; 400 long handled = 1;
353 struct mce_error_info mce_error_info = { 0 }; 401 struct mce_error_info mce_error_info = { 0 };
354 402
403 mce_error_info.severity = MCE_SEV_ERROR_SYNC;
404 mce_error_info.initiator = MCE_INITIATOR_CPU;
405
355 srr1 = regs->msr; 406 srr1 = regs->msr;
356 nip = regs->nip; 407 nip = regs->nip;
357 408
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
372 save_mce_event(regs, handled, &mce_error_info, nip, addr); 423 save_mce_event(regs, handled, &mce_error_info, nip, addr);
373 return handled; 424 return handled;
374} 425}
426
427static int mce_handle_derror_p9(struct pt_regs *regs)
428{
429 uint64_t dsisr = regs->dsisr;
430
431 return mce_handle_flush_derrors(dsisr,
432 P9_DSISR_MC_SLB_PARITY_MFSLB |
433 P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
434
435 P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
436
437 P9_DSISR_MC_ERAT_MULTIHIT);
438}
439
440static int mce_handle_ierror_p9(struct pt_regs *regs)
441{
442 uint64_t srr1 = regs->msr;
443
444 switch (P9_SRR1_MC_IFETCH(srr1)) {
445 case P9_SRR1_MC_IFETCH_SLB_PARITY:
446 case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
447 return mce_flush(MCE_FLUSH_SLB);
448 case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
449 return mce_flush(MCE_FLUSH_TLB);
450 case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
451 return mce_flush(MCE_FLUSH_ERAT);
452 default:
453 return 0;
454 }
455}
456
457static void mce_get_derror_p9(struct pt_regs *regs,
458 struct mce_error_info *mce_err, uint64_t *addr)
459{
460 uint64_t dsisr = regs->dsisr;
461
462 mce_err->severity = MCE_SEV_ERROR_SYNC;
463 mce_err->initiator = MCE_INITIATOR_CPU;
464
465 if (dsisr & P9_DSISR_MC_USER_TLBIE)
466 *addr = regs->nip;
467 else
468 *addr = regs->dar;
469
470 if (dsisr & P9_DSISR_MC_UE) {
471 mce_err->error_type = MCE_ERROR_TYPE_UE;
472 mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
473 } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
474 mce_err->error_type = MCE_ERROR_TYPE_UE;
475 mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
476 } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
477 mce_err->error_type = MCE_ERROR_TYPE_LINK;
478 mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
479 } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
480 mce_err->error_type = MCE_ERROR_TYPE_LINK;
481 mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
482 } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
483 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
484 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
485 } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
486 mce_err->error_type = MCE_ERROR_TYPE_TLB;
487 mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
488 } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
489 mce_err->error_type = MCE_ERROR_TYPE_USER;
490 mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
491 } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
492 mce_err->error_type = MCE_ERROR_TYPE_SLB;
493 mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
494 } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
495 mce_err->error_type = MCE_ERROR_TYPE_SLB;
496 mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
497 } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
498 mce_err->error_type = MCE_ERROR_TYPE_RA;
499 mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
500 } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
501 mce_err->error_type = MCE_ERROR_TYPE_RA;
502 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
503 } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
504 mce_err->error_type = MCE_ERROR_TYPE_RA;
505 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
506 } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
507 mce_err->error_type = MCE_ERROR_TYPE_RA;
508 mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
509 }
510}
511
512static void mce_get_ierror_p9(struct pt_regs *regs,
513 struct mce_error_info *mce_err, uint64_t *addr)
514{
515 uint64_t srr1 = regs->msr;
516
517 switch (P9_SRR1_MC_IFETCH(srr1)) {
518 case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
519 case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
520 mce_err->severity = MCE_SEV_FATAL;
521 break;
522 default:
523 mce_err->severity = MCE_SEV_ERROR_SYNC;
524 break;
525 }
526
527 mce_err->initiator = MCE_INITIATOR_CPU;
528
529 *addr = regs->nip;
530
531 switch (P9_SRR1_MC_IFETCH(srr1)) {
532 case P9_SRR1_MC_IFETCH_UE:
533 mce_err->error_type = MCE_ERROR_TYPE_UE;
534 mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
535 break;
536 case P9_SRR1_MC_IFETCH_SLB_PARITY:
537 mce_err->error_type = MCE_ERROR_TYPE_SLB;
538 mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
539 break;
540 case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
541 mce_err->error_type = MCE_ERROR_TYPE_SLB;
542 mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
543 break;
544 case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
545 mce_err->error_type = MCE_ERROR_TYPE_ERAT;
546 mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
547 break;
548 case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
549 mce_err->error_type = MCE_ERROR_TYPE_TLB;
550 mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
551 break;
552 case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
553 mce_err->error_type = MCE_ERROR_TYPE_UE;
554 mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
555 break;
556 case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
557 mce_err->error_type = MCE_ERROR_TYPE_LINK;
558 mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
559 break;
560 case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
561 mce_err->error_type = MCE_ERROR_TYPE_LINK;
562 mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
563 break;
564 case P9_SRR1_MC_IFETCH_RA:
565 mce_err->error_type = MCE_ERROR_TYPE_RA;
566 mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
567 break;
568 case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
569 mce_err->error_type = MCE_ERROR_TYPE_RA;
570 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
571 break;
572 case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
573 mce_err->error_type = MCE_ERROR_TYPE_RA;
574 mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
575 break;
576 case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
577 mce_err->error_type = MCE_ERROR_TYPE_LINK;
578 mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
579 break;
580 case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
581 mce_err->error_type = MCE_ERROR_TYPE_RA;
582 mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
583 break;
584 default:
585 break;
586 }
587}
588
589long __machine_check_early_realmode_p9(struct pt_regs *regs)
590{
591 uint64_t nip, addr;
592 long handled;
593 struct mce_error_info mce_error_info = { 0 };
594
595 nip = regs->nip;
596
597 if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
598 handled = mce_handle_derror_p9(regs);
599 mce_get_derror_p9(regs, &mce_error_info, &addr);
600 } else {
601 handled = mce_handle_ierror_p9(regs);
602 mce_get_ierror_p9(regs, &mce_error_info, &addr);
603 }
604
605 /* Handle UE error. */
606 if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
607 handled = mce_handle_ue_error(regs);
608
609 save_mce_event(regs, handled, &mce_error_info, nip, addr);
610 return handled;
611}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index a3944540fe0d..1c1b44ec7642 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
168static unsigned long __initdata prom_tce_alloc_end; 168static unsigned long __initdata prom_tce_alloc_end;
169#endif 169#endif
170 170
171static bool __initdata prom_radix_disable;
172
173struct platform_support {
174 bool hash_mmu;
175 bool radix_mmu;
176 bool radix_gtse;
177};
178
171/* Platforms codes are now obsolete in the kernel. Now only used within this 179/* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they 180 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore 181 * are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
626 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 634 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
627#endif 635#endif
628 } 636 }
637
638 opt = strstr(prom_cmd_line, "disable_radix");
639 if (opt) {
640 prom_debug("Radix disabled from cmdline\n");
641 prom_radix_disable = true;
642 }
629} 643}
630 644
631#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 645#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
695 u8 byte22; 709 u8 byte22;
696 u8 intarch; 710 u8 intarch;
697 u8 mmu; 711 u8 mmu;
712 u8 hash_ext;
713 u8 radix_ext;
698} __packed; 714} __packed;
699 715
700struct option_vector6 { 716struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
850 .reserved3 = 0, 866 .reserved3 = 0,
851 .subprocessors = 1, 867 .subprocessors = 1,
852 .intarch = 0, 868 .intarch = 0,
853 .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) | 869 .mmu = 0,
854 OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE), 870 .hash_ext = 0,
871 .radix_ext = 0,
855 }, 872 },
856 873
857 /* option vector 6: IBM PAPR hints */ 874 /* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
990 1007
991} 1008}
992 1009
1010static void __init prom_parse_mmu_model(u8 val,
1011 struct platform_support *support)
1012{
1013 switch (val) {
1014 case OV5_FEAT(OV5_MMU_DYNAMIC):
1015 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1016 prom_debug("MMU - either supported\n");
1017 support->radix_mmu = !prom_radix_disable;
1018 support->hash_mmu = true;
1019 break;
1020 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1021 prom_debug("MMU - radix only\n");
1022 if (prom_radix_disable) {
1023 /*
1024 * If we __have__ to do radix, we're better off ignoring
1025 * the command line rather than not booting.
1026 */
1027 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1028 }
1029 support->radix_mmu = true;
1030 break;
1031 case OV5_FEAT(OV5_MMU_HASH):
1032 prom_debug("MMU - hash only\n");
1033 support->hash_mmu = true;
1034 break;
1035 default:
1036 prom_debug("Unknown mmu support option: 0x%x\n", val);
1037 break;
1038 }
1039}
1040
1041static void __init prom_parse_platform_support(u8 index, u8 val,
1042 struct platform_support *support)
1043{
1044 switch (index) {
1045 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1046 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1047 break;
1048 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1049 if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1050 prom_debug("Radix - GTSE supported\n");
1051 support->radix_gtse = true;
1052 }
1053 break;
1054 }
1055}
1056
1057static void __init prom_check_platform_support(void)
1058{
1059 struct platform_support supported = {
1060 .hash_mmu = false,
1061 .radix_mmu = false,
1062 .radix_gtse = false
1063 };
1064 int prop_len = prom_getproplen(prom.chosen,
1065 "ibm,arch-vec-5-platform-support");
1066 if (prop_len > 1) {
1067 int i;
1068 u8 vec[prop_len];
1069 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1070 prop_len);
1071 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1072 &vec, sizeof(vec));
1073 for (i = 0; i < prop_len; i += 2) {
1074 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1075 , vec[i]
1076 , vec[i + 1]);
1077 prom_parse_platform_support(vec[i], vec[i + 1],
1078 &supported);
1079 }
1080 }
1081
1082 if (supported.radix_mmu && supported.radix_gtse) {
1083 /* Radix preferred - but we require GTSE for now */
1084 prom_debug("Asking for radix with GTSE\n");
1085 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1086 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1087 } else if (supported.hash_mmu) {
1088 /* Default to hash mmu (if we can) */
1089 prom_debug("Asking for hash\n");
1090 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1091 } else {
1092 /* We're probably on a legacy hypervisor */
1093 prom_debug("Assuming legacy hash support\n");
1094 }
1095}
993 1096
994static void __init prom_send_capabilities(void) 1097static void __init prom_send_capabilities(void)
995{ 1098{
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
997 prom_arg_t ret; 1100 prom_arg_t ret;
998 u32 cores; 1101 u32 cores;
999 1102
1103 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1104 prom_check_platform_support();
1105
1000 root = call_prom("open", 1, 1, ADDR("/")); 1106 root = call_prom("open", 1, 1, ADDR("/"));
1001 if (root != 0) { 1107 if (root != 0) {
1002 /* We need to tell the FW about the number of cores we support. 1108 /* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2993 */ 3099 */
2994 prom_check_initrd(r3, r4); 3100 prom_check_initrd(r3, r4);
2995 3101
3102 /*
3103 * Do early parsing of command line
3104 */
3105 early_cmdline_parse();
3106
2996#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 3107#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2997 /* 3108 /*
2998 * On pSeries, inform the firmware about our capabilities 3109 * On pSeries, inform the firmware about our capabilities
@@ -3009,11 +3120,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3009 copy_and_flush(0, kbase, 0x100, 0); 3120 copy_and_flush(0, kbase, 0x100, 0);
3010 3121
3011 /* 3122 /*
3012 * Do early parsing of command line
3013 */
3014 early_cmdline_parse();
3015
3016 /*
3017 * Initialize memory management within prom_init 3123 * Initialize memory management within prom_init
3018 */ 3124 */
3019 prom_init_mem(); 3125 prom_init_mem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index adf2084f214b..9cfaa8b69b5f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
408 info->line_size = lsize; 408 info->line_size = lsize;
409 info->block_size = bsize; 409 info->block_size = bsize;
410 info->log_block_size = __ilog2(bsize); 410 info->log_block_size = __ilog2(bsize);
411 info->blocks_per_page = PAGE_SIZE / bsize; 411 if (bsize)
412 info->blocks_per_page = PAGE_SIZE / bsize;
413 else
414 info->blocks_per_page = 0;
412 415
413 if (sets == 0) 416 if (sets == 0)
414 info->assoc = 0xffff; 417 info->assoc = 0xffff;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3158fb16de3..8c68145ba1bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
601 hva, NULL, NULL); 601 hva, NULL, NULL);
602 if (ptep) { 602 if (ptep) {
603 pte = kvmppc_read_update_linux_pte(ptep, 1); 603 pte = kvmppc_read_update_linux_pte(ptep, 1);
604 if (pte_write(pte)) 604 if (__pte_write(pte))
605 write_ok = 1; 605 write_ok = 1;
606 } 606 }
607 local_irq_restore(flags); 607 local_irq_restore(flags);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6fca970373ee..ce6f2121fffe 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
256 } 256 }
257 pte = kvmppc_read_update_linux_pte(ptep, writing); 257 pte = kvmppc_read_update_linux_pte(ptep, writing);
258 if (pte_present(pte) && !pte_protnone(pte)) { 258 if (pte_present(pte) && !pte_protnone(pte)) {
259 if (writing && !pte_write(pte)) 259 if (writing && !__pte_write(pte))
260 /* make the actual HPTE be read-only */ 260 /* make the actual HPTE be read-only */
261 ptel = hpte_make_readonly(ptel); 261 ptel = hpte_make_readonly(ptel);
262 is_ci = pte_ci(pte); 262 is_ci = pte_ci(pte);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 0e649d72fe8d..2b5e09020cfe 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
20 20
21obj64-$(CONFIG_SMP) += locks.o 21obj64-$(CONFIG_SMP) += locks.o
22obj64-$(CONFIG_ALTIVEC) += vmx-helper.o 22obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
23obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
23 24
24obj-y += checksum_$(BITS).o checksum_wrappers.o 25obj-y += checksum_$(BITS).o checksum_wrappers.o
25 26
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 846dba2c6360..9c542ec70c5b 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1799 goto instr_done; 1799 goto instr_done;
1800 1800
1801 case LARX: 1801 case LARX:
1802 if (regs->msr & MSR_LE)
1803 return 0;
1804 if (op.ea & (size - 1)) 1802 if (op.ea & (size - 1))
1805 break; /* can't handle misaligned */ 1803 break; /* can't handle misaligned */
1806 if (!address_ok(regs, op.ea, size)) 1804 if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1823 goto ldst_done; 1821 goto ldst_done;
1824 1822
1825 case STCX: 1823 case STCX:
1826 if (regs->msr & MSR_LE)
1827 return 0;
1828 if (op.ea & (size - 1)) 1824 if (op.ea & (size - 1))
1829 break; /* can't handle misaligned */ 1825 break; /* can't handle misaligned */
1830 if (!address_ok(regs, op.ea, size)) 1826 if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1849 goto ldst_done; 1845 goto ldst_done;
1850 1846
1851 case LOAD: 1847 case LOAD:
1852 if (regs->msr & MSR_LE)
1853 return 0;
1854 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs); 1848 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1855 if (!err) { 1849 if (!err) {
1856 if (op.type & SIGNEXT) 1850 if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1862 1856
1863#ifdef CONFIG_PPC_FPU 1857#ifdef CONFIG_PPC_FPU
1864 case LOAD_FP: 1858 case LOAD_FP:
1865 if (regs->msr & MSR_LE)
1866 return 0;
1867 if (size == 4) 1859 if (size == 4)
1868 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); 1860 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1869 else 1861 else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1872#endif 1864#endif
1873#ifdef CONFIG_ALTIVEC 1865#ifdef CONFIG_ALTIVEC
1874 case LOAD_VMX: 1866 case LOAD_VMX:
1875 if (regs->msr & MSR_LE)
1876 return 0;
1877 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); 1867 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1878 goto ldst_done; 1868 goto ldst_done;
1879#endif 1869#endif
1880#ifdef CONFIG_VSX 1870#ifdef CONFIG_VSX
1881 case LOAD_VSX: 1871 case LOAD_VSX:
1882 if (regs->msr & MSR_LE)
1883 return 0;
1884 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); 1872 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1885 goto ldst_done; 1873 goto ldst_done;
1886#endif 1874#endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1903 goto instr_done; 1891 goto instr_done;
1904 1892
1905 case STORE: 1893 case STORE:
1906 if (regs->msr & MSR_LE)
1907 return 0;
1908 if ((op.type & UPDATE) && size == sizeof(long) && 1894 if ((op.type & UPDATE) && size == sizeof(long) &&
1909 op.reg == 1 && op.update_reg == 1 && 1895 op.reg == 1 && op.update_reg == 1 &&
1910 !(regs->msr & MSR_PR) && 1896 !(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1917 1903
1918#ifdef CONFIG_PPC_FPU 1904#ifdef CONFIG_PPC_FPU
1919 case STORE_FP: 1905 case STORE_FP:
1920 if (regs->msr & MSR_LE)
1921 return 0;
1922 if (size == 4) 1906 if (size == 4)
1923 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); 1907 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1924 else 1908 else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1927#endif 1911#endif
1928#ifdef CONFIG_ALTIVEC 1912#ifdef CONFIG_ALTIVEC
1929 case STORE_VMX: 1913 case STORE_VMX:
1930 if (regs->msr & MSR_LE)
1931 return 0;
1932 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); 1914 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1933 goto ldst_done; 1915 goto ldst_done;
1934#endif 1916#endif
1935#ifdef CONFIG_VSX 1917#ifdef CONFIG_VSX
1936 case STORE_VSX: 1918 case STORE_VSX:
1937 if (regs->msr & MSR_LE)
1938 return 0;
1939 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); 1919 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1940 goto ldst_done; 1920 goto ldst_done;
1941#endif 1921#endif
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
new file mode 100644
index 000000000000..2534c1447554
--- /dev/null
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -0,0 +1,434 @@
1/*
2 * Simple sanity test for emulate_step load/store instructions.
3 *
4 * Copyright IBM Corp. 2016
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) "emulate_step_test: " fmt
13
14#include <linux/ptrace.h>
15#include <asm/sstep.h>
16#include <asm/ppc-opcode.h>
17
18#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
19
20/*
21 * Defined with TEST_ prefix so it does not conflict with other
22 * definitions.
23 */
24#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
25 ___PPC_RA(base) | IMM_L(i))
26#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
27 ___PPC_RA(base) | IMM_L(i))
28#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
29 ___PPC_RA(a) | ___PPC_RB(b))
30#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
31 ___PPC_RA(base) | ((i) & 0xfffc))
32#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
33 ___PPC_RA(a) | ___PPC_RB(b) | \
34 __PPC_EH(eh))
35#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
36 ___PPC_RA(a) | ___PPC_RB(b))
37#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
38 ___PPC_RA(a) | ___PPC_RB(b))
39#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
40 ___PPC_RA(a) | ___PPC_RB(b))
41#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
42 ___PPC_RA(a) | ___PPC_RB(b))
43#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
44 ___PPC_RA(a) | ___PPC_RB(b))
45#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
46 ___PPC_RA(a) | ___PPC_RB(b))
47#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
48 ___PPC_RA(a) | ___PPC_RB(b))
49#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
50#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
51
52
53static void __init init_pt_regs(struct pt_regs *regs)
54{
55 static unsigned long msr;
56 static bool msr_cached;
57
58 memset(regs, 0, sizeof(struct pt_regs));
59
60 if (likely(msr_cached)) {
61 regs->msr = msr;
62 return;
63 }
64
65 asm volatile("mfmsr %0" : "=r"(regs->msr));
66
67 regs->msr |= MSR_FP;
68 regs->msr |= MSR_VEC;
69 regs->msr |= MSR_VSX;
70
71 msr = regs->msr;
72 msr_cached = true;
73}
74
75static void __init show_result(char *ins, char *result)
76{
77 pr_info("%-14s : %s\n", ins, result);
78}
79
80static void __init test_ld(void)
81{
82 struct pt_regs regs;
83 unsigned long a = 0x23;
84 int stepped = -1;
85
86 init_pt_regs(&regs);
87 regs.gpr[3] = (unsigned long) &a;
88
89 /* ld r5, 0(r3) */
90 stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
91
92 if (stepped == 1 && regs.gpr[5] == a)
93 show_result("ld", "PASS");
94 else
95 show_result("ld", "FAIL");
96}
97
98static void __init test_lwz(void)
99{
100 struct pt_regs regs;
101 unsigned int a = 0x4545;
102 int stepped = -1;
103
104 init_pt_regs(&regs);
105 regs.gpr[3] = (unsigned long) &a;
106
107 /* lwz r5, 0(r3) */
108 stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
109
110 if (stepped == 1 && regs.gpr[5] == a)
111 show_result("lwz", "PASS");
112 else
113 show_result("lwz", "FAIL");
114}
115
116static void __init test_lwzx(void)
117{
118 struct pt_regs regs;
119 unsigned int a[3] = {0x0, 0x0, 0x1234};
120 int stepped = -1;
121
122 init_pt_regs(&regs);
123 regs.gpr[3] = (unsigned long) a;
124 regs.gpr[4] = 8;
125 regs.gpr[5] = 0x8765;
126
127 /* lwzx r5, r3, r4 */
128 stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
129 if (stepped == 1 && regs.gpr[5] == a[2])
130 show_result("lwzx", "PASS");
131 else
132 show_result("lwzx", "FAIL");
133}
134
135static void __init test_std(void)
136{
137 struct pt_regs regs;
138 unsigned long a = 0x1234;
139 int stepped = -1;
140
141 init_pt_regs(&regs);
142 regs.gpr[3] = (unsigned long) &a;
143 regs.gpr[5] = 0x5678;
144
145 /* std r5, 0(r3) */
146 stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
147 if (stepped == 1 || regs.gpr[5] == a)
148 show_result("std", "PASS");
149 else
150 show_result("std", "FAIL");
151}
152
153static void __init test_ldarx_stdcx(void)
154{
155 struct pt_regs regs;
156 unsigned long a = 0x1234;
157 int stepped = -1;
158 unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
159
160 init_pt_regs(&regs);
161 asm volatile("mfcr %0" : "=r"(regs.ccr));
162
163
164 /*** ldarx ***/
165
166 regs.gpr[3] = (unsigned long) &a;
167 regs.gpr[4] = 0;
168 regs.gpr[5] = 0x5678;
169
170 /* ldarx r5, r3, r4, 0 */
171 stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
172
173 /*
174 * Don't touch 'a' here. Touching 'a' can do Load/store
175 * of 'a' which result in failure of subsequent stdcx.
176 * Instead, use hardcoded value for comparison.
177 */
178 if (stepped <= 0 || regs.gpr[5] != 0x1234) {
179 show_result("ldarx / stdcx.", "FAIL (ldarx)");
180 return;
181 }
182
183
184 /*** stdcx. ***/
185
186 regs.gpr[5] = 0x9ABC;
187
188 /* stdcx. r5, r3, r4 */
189 stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
190
191 /*
192 * Two possible scenarios that indicates successful emulation
193 * of stdcx. :
194 * 1. Reservation is active and store is performed. In this
195 * case cr0.eq bit will be set to 1.
196 * 2. Reservation is not active and store is not performed.
197 * In this case cr0.eq bit will be set to 0.
198 */
199 if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
200 || (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
201 show_result("ldarx / stdcx.", "PASS");
202 else
203 show_result("ldarx / stdcx.", "FAIL (stdcx.)");
204}
205
206#ifdef CONFIG_PPC_FPU
207static void __init test_lfsx_stfsx(void)
208{
209 struct pt_regs regs;
210 union {
211 float a;
212 int b;
213 } c;
214 int cached_b;
215 int stepped = -1;
216
217 init_pt_regs(&regs);
218
219
220 /*** lfsx ***/
221
222 c.a = 123.45;
223 cached_b = c.b;
224
225 regs.gpr[3] = (unsigned long) &c.a;
226 regs.gpr[4] = 0;
227
228 /* lfsx frt10, r3, r4 */
229 stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
230
231 if (stepped == 1)
232 show_result("lfsx", "PASS");
233 else
234 show_result("lfsx", "FAIL");
235
236
237 /*** stfsx ***/
238
239 c.a = 678.91;
240
241 /* stfsx frs10, r3, r4 */
242 stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
243
244 if (stepped == 1 && c.b == cached_b)
245 show_result("stfsx", "PASS");
246 else
247 show_result("stfsx", "FAIL");
248}
249
250static void __init test_lfdx_stfdx(void)
251{
252 struct pt_regs regs;
253 union {
254 double a;
255 long b;
256 } c;
257 long cached_b;
258 int stepped = -1;
259
260 init_pt_regs(&regs);
261
262
263 /*** lfdx ***/
264
265 c.a = 123456.78;
266 cached_b = c.b;
267
268 regs.gpr[3] = (unsigned long) &c.a;
269 regs.gpr[4] = 0;
270
271 /* lfdx frt10, r3, r4 */
272 stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
273
274 if (stepped == 1)
275 show_result("lfdx", "PASS");
276 else
277 show_result("lfdx", "FAIL");
278
279
280 /*** stfdx ***/
281
282 c.a = 987654.32;
283
284 /* stfdx frs10, r3, r4 */
285 stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
286
287 if (stepped == 1 && c.b == cached_b)
288 show_result("stfdx", "PASS");
289 else
290 show_result("stfdx", "FAIL");
291}
292#else
293static void __init test_lfsx_stfsx(void)
294{
295 show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
296 show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
297}
298
299static void __init test_lfdx_stfdx(void)
300{
301 show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
302 show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
303}
304#endif /* CONFIG_PPC_FPU */
305
306#ifdef CONFIG_ALTIVEC
307static void __init test_lvx_stvx(void)
308{
309 struct pt_regs regs;
310 union {
311 vector128 a;
312 u32 b[4];
313 } c;
314 u32 cached_b[4];
315 int stepped = -1;
316
317 init_pt_regs(&regs);
318
319
320 /*** lvx ***/
321
322 cached_b[0] = c.b[0] = 923745;
323 cached_b[1] = c.b[1] = 2139478;
324 cached_b[2] = c.b[2] = 9012;
325 cached_b[3] = c.b[3] = 982134;
326
327 regs.gpr[3] = (unsigned long) &c.a;
328 regs.gpr[4] = 0;
329
330 /* lvx vrt10, r3, r4 */
331 stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
332
333 if (stepped == 1)
334 show_result("lvx", "PASS");
335 else
336 show_result("lvx", "FAIL");
337
338
339 /*** stvx ***/
340
341 c.b[0] = 4987513;
342 c.b[1] = 84313948;
343 c.b[2] = 71;
344 c.b[3] = 498532;
345
346 /* stvx vrs10, r3, r4 */
347 stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
348
349 if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
350 cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
351 show_result("stvx", "PASS");
352 else
353 show_result("stvx", "FAIL");
354}
355#else
356static void __init test_lvx_stvx(void)
357{
358 show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
359 show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
360}
361#endif /* CONFIG_ALTIVEC */
362
363#ifdef CONFIG_VSX
364static void __init test_lxvd2x_stxvd2x(void)
365{
366 struct pt_regs regs;
367 union {
368 vector128 a;
369 u32 b[4];
370 } c;
371 u32 cached_b[4];
372 int stepped = -1;
373
374 init_pt_regs(&regs);
375
376
377 /*** lxvd2x ***/
378
379 cached_b[0] = c.b[0] = 18233;
380 cached_b[1] = c.b[1] = 34863571;
381 cached_b[2] = c.b[2] = 834;
382 cached_b[3] = c.b[3] = 6138911;
383
384 regs.gpr[3] = (unsigned long) &c.a;
385 regs.gpr[4] = 0;
386
387 /* lxvd2x vsr39, r3, r4 */
388 stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
389
390 if (stepped == 1)
391 show_result("lxvd2x", "PASS");
392 else
393 show_result("lxvd2x", "FAIL");
394
395
396 /*** stxvd2x ***/
397
398 c.b[0] = 21379463;
399 c.b[1] = 87;
400 c.b[2] = 374234;
401 c.b[3] = 4;
402
403 /* stxvd2x vsr39, r3, r4 */
404 stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
405
406 if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
407 cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
408 show_result("stxvd2x", "PASS");
409 else
410 show_result("stxvd2x", "FAIL");
411}
412#else
413static void __init test_lxvd2x_stxvd2x(void)
414{
415 show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
416 show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
417}
418#endif /* CONFIG_VSX */
419
420static int __init test_emulate_step(void)
421{
422 test_ld();
423 test_lwz();
424 test_lwzx();
425 test_std();
426 test_ldarx_stdcx();
427 test_lfsx_stfsx();
428 test_lfdx_stfdx();
429 test_lvx_stvx();
430 test_lxvd2x_stxvd2x();
431
432 return 0;
433}
434late_initcall(test_emulate_step);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6aa3b76aa0d6..9be992083d2a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -356,18 +356,42 @@ static void early_check_vec5(void)
356 unsigned long root, chosen; 356 unsigned long root, chosen;
357 int size; 357 int size;
358 const u8 *vec5; 358 const u8 *vec5;
359 u8 mmu_supported;
359 360
360 root = of_get_flat_dt_root(); 361 root = of_get_flat_dt_root();
361 chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); 362 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
362 if (chosen == -FDT_ERR_NOTFOUND) 363 if (chosen == -FDT_ERR_NOTFOUND) {
364 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
363 return; 365 return;
366 }
364 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); 367 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
365 if (!vec5) 368 if (!vec5) {
369 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
366 return; 370 return;
367 if (size <= OV5_INDX(OV5_MMU_RADIX_300) || 371 }
368 !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300))) 372 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
369 /* Hypervisor doesn't support radix */
370 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 373 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
374 return;
375 }
376
377 /* Check for supported configuration */
378 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
379 OV5_FEAT(OV5_MMU_SUPPORT);
380 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
381 /* Hypervisor only supports radix - check enabled && GTSE */
382 if (!early_radix_enabled()) {
383 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
384 }
385 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
386 OV5_FEAT(OV5_RADIX_GTSE))) {
387 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
388 }
389 /* Do radix anyway - the hypervisor said we had to */
390 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
391 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
392 /* Hypervisor only supports hash - disable radix */
393 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
394 }
371} 395}
372 396
373void __init mmu_early_init_devtree(void) 397void __init mmu_early_init_devtree(void)
@@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void)
383 * even though the ibm,architecture-vec-5 property created by 407 * even though the ibm,architecture-vec-5 property created by
384 * skiboot doesn't have the necessary bits set. 408 * skiboot doesn't have the necessary bits set.
385 */ 409 */
386 if (early_radix_enabled() && !(mfmsr() & MSR_HV)) 410 if (!(mfmsr() & MSR_HV))
387 early_check_vec5(); 411 early_check_vec5();
388 412
389 if (early_radix_enabled()) 413 if (early_radix_enabled())
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 2a590a98e652..c28165d8970b 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
186 */ 186 */
187 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); 187 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
188 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); 188 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
189 asm volatile("ptesync" : : : "memory");
190 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
191 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
192 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
189} 193}
190 194
191static void __init radix_init_partition_table(void) 195static void __init radix_init_partition_table(void)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 595dd718ea87..2ff13249f87a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
188 sdsync = POWER7P_MMCRA_SDAR_VALID; 188 sdsync = POWER7P_MMCRA_SDAR_VALID;
189 else if (ppmu->flags & PPMU_ALT_SIPR) 189 else if (ppmu->flags & PPMU_ALT_SIPR)
190 sdsync = POWER6_MMCRA_SDSYNC; 190 sdsync = POWER6_MMCRA_SDSYNC;
191 else if (ppmu->flags & PPMU_NO_SIAR)
192 sdsync = MMCRA_SAMPLE_ENABLE;
191 else 193 else
192 sdsync = MMCRA_SDSYNC; 194 sdsync = MMCRA_SDSYNC;
193 195
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index e79fb5fb817d..cd951fd231c4 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
65 return !(event & ~valid_mask); 65 return !(event & ~valid_mask);
66} 66}
67 67
68static u64 mmcra_sdar_mode(u64 event) 68static inline bool is_event_marked(u64 event)
69{ 69{
70 if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) 70 if (event & EVENT_IS_MARKED)
71 return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; 71 return true;
72
73 return false;
74}
72 75
73 return MMCRA_SDAR_MODE_TLB; 76static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
77{
78 /*
79 * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
80 * continous sampling mode.
81 *
82 * Incase of Power8:
83 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
84 * mode and will be un-changed when setting MMCRA[63] (Marked events).
85 *
86 * Incase of Power9:
87 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
88 * or if group already have any marked events.
89 * Non-Marked events (for DD1):
90 * MMCRA[SDAR_MODE] will be set to 0b01
91 * For rest
92 * MMCRA[SDAR_MODE] will be set from event code.
93 */
94 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
95 if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
96 *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
97 else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
98 *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
99 else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
100 *mmcra |= MMCRA_SDAR_MODE_TLB;
101 } else
102 *mmcra |= MMCRA_SDAR_MODE_TLB;
74} 103}
75 104
76static u64 thresh_cmp_val(u64 value) 105static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
180 value |= CNST_L1_QUAL_VAL(cache); 209 value |= CNST_L1_QUAL_VAL(cache);
181 } 210 }
182 211
183 if (event & EVENT_IS_MARKED) { 212 if (is_event_marked(event)) {
184 mask |= CNST_SAMPLE_MASK; 213 mask |= CNST_SAMPLE_MASK;
185 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); 214 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
186 } 215 }
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
276 } 305 }
277 306
278 /* In continuous sampling mode, update SDAR on TLB miss */ 307 /* In continuous sampling mode, update SDAR on TLB miss */
279 mmcra |= mmcra_sdar_mode(event[i]); 308 mmcra_sdar_mode(event[i], &mmcra);
280 309
281 if (event[i] & EVENT_IS_L1) { 310 if (event[i] & EVENT_IS_L1) {
282 cache = event[i] >> EVENT_CACHE_SEL_SHIFT; 311 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
285 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; 314 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
286 } 315 }
287 316
288 if (event[i] & EVENT_IS_MARKED) { 317 if (is_event_marked(event[i])) {
289 mmcra |= MMCRA_SAMPLE_ENABLE; 318 mmcra |= MMCRA_SAMPLE_ENABLE;
290 319
291 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; 320 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index cf9bd8990159..899210f14ee4 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -246,6 +246,7 @@
246#define MMCRA_THR_CMP_SHIFT 32 246#define MMCRA_THR_CMP_SHIFT 32
247#define MMCRA_SDAR_MODE_SHIFT 42 247#define MMCRA_SDAR_MODE_SHIFT 42
248#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) 248#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
249#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
249#define MMCRA_IFM_SHIFT 30 250#define MMCRA_IFM_SHIFT 30
250 251
251/* MMCR1 Threshold Compare bit constant for power9 */ 252/* MMCR1 Threshold Compare bit constant for power9 */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 6693f75e93d1..da8a0f7a035c 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
39BEGIN_FTR_SECTION; \ 39BEGIN_FTR_SECTION; \
40 b 1f; \ 40 b 1f; \
41END_FTR_SECTION(0, 1); \ 41END_FTR_SECTION(0, 1); \
42 ld r12,opal_tracepoint_refcount@toc(r2); \ 42 ld r11,opal_tracepoint_refcount@toc(r2); \
43 cmpdi r12,0; \ 43 cmpdi r11,0; \
44 bne- LABEL; \ 44 bne- LABEL; \
451: 451:
46 46
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 86d9fde93c17..e0f856bfbfe8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
395 struct machine_check_event *evt) 395 struct machine_check_event *evt)
396{ 396{
397 int recovered = 0; 397 int recovered = 0;
398 uint64_t ea = get_mce_fault_addr(evt);
399 398
400 if (!(regs->msr & MSR_RI)) { 399 if (!(regs->msr & MSR_RI)) {
401 /* If MSR_RI isn't set, we cannot recover */ 400 /* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
404 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 403 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
405 /* Platform corrected itself */ 404 /* Platform corrected itself */
406 recovered = 1; 405 recovered = 1;
407 } else if (ea && !is_kernel_addr(ea)) { 406 } else if (evt->severity == MCE_SEV_FATAL) {
407 /* Fatal machine check */
408 pr_err("Machine check interrupt is fatal\n");
409 recovered = 0;
410 } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
411 (user_mode(regs) && !is_global_init(current))) {
408 /* 412 /*
409 * Faulting address is not in kernel text. We should be fine.
410 * We need to find which process uses this address.
411 * For now, kill the task if we have received exception when 413 * For now, kill the task if we have received exception when
412 * in userspace. 414 * in userspace.
413 * 415 *
414 * TODO: Queue up this address for hwpoisioning later. 416 * TODO: Queue up this address for hwpoisioning later.
415 */ 417 */
416 if (user_mode(regs) && !is_global_init(current)) {
417 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
418 recovered = 1;
419 } else
420 recovered = 0;
421 } else if (user_mode(regs) && !is_global_init(current) &&
422 evt->severity == MCE_SEV_ERROR_SYNC) {
423 /*
424 * If we have received a synchronous error when in userspace
425 * kill the task.
426 */
427 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); 418 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
428 recovered = 1; 419 recovered = 1;
429 } 420 }
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6901a06da2f9..e36738291c32 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
1775} 1775}
1776 1776
1777static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, 1777static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1778 struct pci_bus *bus) 1778 struct pci_bus *bus,
1779 bool add_to_group)
1779{ 1780{
1780 struct pci_dev *dev; 1781 struct pci_dev *dev;
1781 1782
1782 list_for_each_entry(dev, &bus->devices, bus_list) { 1783 list_for_each_entry(dev, &bus->devices, bus_list) {
1783 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); 1784 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
1784 set_dma_offset(&dev->dev, pe->tce_bypass_base); 1785 set_dma_offset(&dev->dev, pe->tce_bypass_base);
1785 iommu_add_device(&dev->dev); 1786 if (add_to_group)
1787 iommu_add_device(&dev->dev);
1786 1788
1787 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) 1789 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
1788 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 1790 pnv_ioda_setup_bus_dma(pe, dev->subordinate,
1791 add_to_group);
1789 } 1792 }
1790} 1793}
1791 1794
@@ -2191,7 +2194,7 @@ found:
2191 set_iommu_table_base(&pe->pdev->dev, tbl); 2194 set_iommu_table_base(&pe->pdev->dev, tbl);
2192 iommu_add_device(&pe->pdev->dev); 2195 iommu_add_device(&pe->pdev->dev);
2193 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) 2196 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2194 pnv_ioda_setup_bus_dma(pe, pe->pbus); 2197 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
2195 2198
2196 return; 2199 return;
2197 fail: 2200 fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
2426 2429
2427 pnv_pci_ioda2_set_bypass(pe, false); 2430 pnv_pci_ioda2_set_bypass(pe, false);
2428 pnv_pci_ioda2_unset_window(&pe->table_group, 0); 2431 pnv_pci_ioda2_unset_window(&pe->table_group, 0);
2432 if (pe->pbus)
2433 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
2429 pnv_ioda2_table_free(tbl); 2434 pnv_ioda2_table_free(tbl);
2430} 2435}
2431 2436
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
2435 table_group); 2440 table_group);
2436 2441
2437 pnv_pci_ioda2_setup_default_config(pe); 2442 pnv_pci_ioda2_setup_default_config(pe);
2443 if (pe->pbus)
2444 pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
2438} 2445}
2439 2446
2440static struct iommu_table_group_ops pnv_pci_ioda2_ops = { 2447static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2624 level_shift = entries_shift + 3; 2631 level_shift = entries_shift + 3;
2625 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT); 2632 level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
2626 2633
2634 if ((level_shift - 3) * levels + page_shift >= 60)
2635 return -EINVAL;
2636
2627 /* Allocate TCE table */ 2637 /* Allocate TCE table */
2628 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2638 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2629 levels, tce_table_size, &offset, &total_allocated); 2639 levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
2728 if (pe->flags & PNV_IODA_PE_DEV) 2738 if (pe->flags & PNV_IODA_PE_DEV)
2729 iommu_add_device(&pe->pdev->dev); 2739 iommu_add_device(&pe->pdev->dev);
2730 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) 2740 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
2731 pnv_ioda_setup_bus_dma(pe, pe->pbus); 2741 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
2732} 2742}
2733 2743
2734#ifdef CONFIG_PCI_MSI 2744#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index f9760ccf4032..3696ea6c4826 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -116,13 +116,13 @@ dt_offset:
116 116
117 .data 117 .data
118 .balign 8 118 .balign 8
119.globl sha256_digest 119.globl purgatory_sha256_digest
120sha256_digest: 120purgatory_sha256_digest:
121 .skip 32 121 .skip 32
122 .size sha256_digest, . - sha256_digest 122 .size purgatory_sha256_digest, . - purgatory_sha256_digest
123 123
124 .balign 8 124 .balign 8
125.globl sha_regions 125.globl purgatory_sha_regions
126sha_regions: 126purgatory_sha_regions:
127 .skip 8 * 2 * 16 127 .skip 8 * 2 * 16
128 .size sha_regions, . - sha_regions 128 .size purgatory_sha_regions, . - purgatory_sha_regions
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ada29eaed6e2..f523ac883150 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -274,7 +274,9 @@ failed:
274 if (bank->disk->major > 0) 274 if (bank->disk->major > 0)
275 unregister_blkdev(bank->disk->major, 275 unregister_blkdev(bank->disk->major,
276 bank->disk->disk_name); 276 bank->disk->disk_name);
277 del_gendisk(bank->disk); 277 if (bank->disk->flags & GENHD_FL_UP)
278 del_gendisk(bank->disk);
279 put_disk(bank->disk);
278 } 280 }
279 device->dev.platform_data = NULL; 281 device->dev.platform_data = NULL;
280 if (bank->io_addr != 0) 282 if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
299 device_remove_file(&device->dev, &dev_attr_ecc); 301 device_remove_file(&device->dev, &dev_attr_ecc);
300 free_irq(bank->irq_id, device); 302 free_irq(bank->irq_id, device);
301 del_gendisk(bank->disk); 303 del_gendisk(bank->disk);
304 put_disk(bank->disk);
302 iounmap((void __iomem *) bank->io_addr); 305 iounmap((void __iomem *) bank->io_addr);
303 kfree(bank); 306 kfree(bank);
304 307
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index f9670eabfcfa..b53f80f0b4d8 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
91 91
92static void icp_opal_set_cpu_priority(unsigned char cppr) 92static void icp_opal_set_cpu_priority(unsigned char cppr)
93{ 93{
94 /*
95 * Here be dragons. The caller has asked to allow only IPI's and not
96 * external interrupts. But OPAL XIVE doesn't support that. So instead
97 * of allowing no interrupts allow all. That's still not right, but
98 * currently the only caller who does this is xics_migrate_irqs_away()
99 * and it works in that case.
100 */
101 if (cppr >= DEFAULT_PRIORITY)
102 cppr = LOWEST_PRIORITY;
103
94 xics_set_base_cppr(cppr); 104 xics_set_base_cppr(cppr);
95 opal_int_set_cppr(cppr); 105 opal_int_set_cppr(cppr);
96 iosync(); 106 iosync();
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 69d858e51ac7..23efe4e42172 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -20,6 +20,7 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/delay.h>
23 24
24#include <asm/prom.h> 25#include <asm/prom.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
198 /* Remove ourselves from the global interrupt queue */ 199 /* Remove ourselves from the global interrupt queue */
199 xics_set_cpu_giq(xics_default_distrib_server, 0); 200 xics_set_cpu_giq(xics_default_distrib_server, 0);
200 201
201 /* Allow IPIs again... */
202 icp_ops->set_priority(DEFAULT_PRIORITY);
203
204 for_each_irq_desc(virq, desc) { 202 for_each_irq_desc(virq, desc) {
205 struct irq_chip *chip; 203 struct irq_chip *chip;
206 long server; 204 long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
255unlock: 253unlock:
256 raw_spin_unlock_irqrestore(&desc->lock, flags); 254 raw_spin_unlock_irqrestore(&desc->lock, flags);
257 } 255 }
256
257 /* Allow "sufficient" time to drop any inflight IRQ's */
258 mdelay(5);
259
260 /*
261 * Allow IPIs again. This is done at the very end, after migrating all
262 * interrupts, the expectation is that we'll only get woken up by an IPI
263 * interrupt beyond this point, but leave externals masked just to be
264 * safe. If we're using icp-opal this may actually allow all
265 * interrupts anyway, but that should be OK.
266 */
267 icp_ops->set_priority(DEFAULT_PRIORITY);
268
258} 269}
259#endif /* CONFIG_HOTPLUG_CPU */ 270#endif /* CONFIG_HOTPLUG_CPU */
260 271
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 143b1e00b818..4b176fe83da4 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y
609CONFIG_FTRACE_SYSCALLS=y 609CONFIG_FTRACE_SYSCALLS=y
610CONFIG_STACK_TRACER=y 610CONFIG_STACK_TRACER=y
611CONFIG_BLK_DEV_IO_TRACE=y 611CONFIG_BLK_DEV_IO_TRACE=y
612CONFIG_UPROBE_EVENT=y 612CONFIG_UPROBE_EVENTS=y
613CONFIG_FUNCTION_PROFILER=y 613CONFIG_FUNCTION_PROFILER=y
614CONFIG_HIST_TRIGGERS=y 614CONFIG_HIST_TRIGGERS=y
615CONFIG_TRACE_ENUM_MAP_FILE=y 615CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index f05d2d6e1087..0de46cc397f6 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y
560CONFIG_FTRACE_SYSCALLS=y 560CONFIG_FTRACE_SYSCALLS=y
561CONFIG_STACK_TRACER=y 561CONFIG_STACK_TRACER=y
562CONFIG_BLK_DEV_IO_TRACE=y 562CONFIG_BLK_DEV_IO_TRACE=y
563CONFIG_UPROBE_EVENT=y 563CONFIG_UPROBE_EVENTS=y
564CONFIG_FUNCTION_PROFILER=y 564CONFIG_FUNCTION_PROFILER=y
565CONFIG_HIST_TRIGGERS=y 565CONFIG_HIST_TRIGGERS=y
566CONFIG_TRACE_ENUM_MAP_FILE=y 566CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 2358bf33c5ef..e167557b434c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y
558CONFIG_FTRACE_SYSCALLS=y 558CONFIG_FTRACE_SYSCALLS=y
559CONFIG_STACK_TRACER=y 559CONFIG_STACK_TRACER=y
560CONFIG_BLK_DEV_IO_TRACE=y 560CONFIG_BLK_DEV_IO_TRACE=y
561CONFIG_UPROBE_EVENT=y 561CONFIG_UPROBE_EVENTS=y
562CONFIG_FUNCTION_PROFILER=y 562CONFIG_FUNCTION_PROFILER=y
563CONFIG_HIST_TRIGGERS=y 563CONFIG_HIST_TRIGGERS=y
564CONFIG_TRACE_ENUM_MAP_FILE=y 564CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index d69ea495c4d7..716b17238599 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
474 ret = blkcipher_walk_done(desc, walk, nbytes - n); 474 ret = blkcipher_walk_done(desc, walk, nbytes - n);
475 } 475 }
476 if (k < n) { 476 if (k < n) {
477 if (__ctr_paes_set_key(ctx) != 0) 477 if (__ctr_paes_set_key(ctx) != 0) {
478 if (locked)
479 spin_unlock(&ctrblk_lock);
478 return blkcipher_walk_done(desc, walk, -EIO); 480 return blkcipher_walk_done(desc, walk, -EIO);
481 }
479 } 482 }
480 } 483 }
481 if (locked) 484 if (locked)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 68bfd09f1b02..97189dbaf34b 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y
179CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 179CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
180CONFIG_STACK_TRACER=y 180CONFIG_STACK_TRACER=y
181CONFIG_BLK_DEV_IO_TRACE=y 181CONFIG_BLK_DEV_IO_TRACE=y
182CONFIG_UPROBE_EVENT=y 182CONFIG_UPROBE_EVENTS=y
183CONFIG_FUNCTION_PROFILER=y 183CONFIG_FUNCTION_PROFILER=y
184CONFIG_TRACE_ENUM_MAP_FILE=y 184CONFIG_TRACE_ENUM_MAP_FILE=y
185CONFIG_KPROBES_SANITY_TEST=y 185CONFIG_KPROBES_SANITY_TEST=y
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index d1c407ddf703..9072bf63a846 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,31 +8,27 @@
8#define _S390_CPUTIME_H 8#define _S390_CPUTIME_H
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/div64.h> 11#include <asm/timex.h>
12 12
13#define CPUTIME_PER_USEC 4096ULL 13#define CPUTIME_PER_USEC 4096ULL
14#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) 14#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
15 15
16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
17 17
18typedef unsigned long long __nocast cputime_t;
19typedef unsigned long long __nocast cputime64_t;
20
21#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) 18#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
22 19
23static inline unsigned long __div(unsigned long long n, unsigned long base)
24{
25 return n / base;
26}
27
28/* 20/*
29 * Convert cputime to microseconds and back. 21 * Convert cputime to microseconds.
30 */ 22 */
31static inline unsigned int cputime_to_usecs(const cputime_t cputime) 23static inline u64 cputime_to_usecs(const u64 cputime)
32{ 24{
33 return (__force unsigned long long) cputime >> 12; 25 return cputime >> 12;
34} 26}
35 27
28/*
29 * Convert cputime to nanoseconds.
30 */
31#define cputime_to_nsecs(cputime) tod_to_ns(cputime)
36 32
37u64 arch_cpu_idle_time(int cpu); 33u64 arch_cpu_idle_time(int cpu);
38 34
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7ed1972b1920..93e37b12e882 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -24,6 +24,7 @@
24 * the S390 page table tree. 24 * the S390 page table tree.
25 */ 25 */
26#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
27#include <asm-generic/5level-fixup.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
28#include <linux/mm_types.h> 29#include <linux/mm_types.h>
29#include <linux/page-flags.h> 30#include <linux/page-flags.h>
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 354344dcc198..118535123f34 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void)
206 * ns = (todval * 125) >> 9; 206 * ns = (todval * 125) >> 9;
207 * 207 *
208 * In order to avoid an overflow with the multiplication we can rewrite this. 208 * In order to avoid an overflow with the multiplication we can rewrite this.
209 * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) 209 * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits)
210 * we end up with 210 * we end up with
211 * 211 *
212 * ns = ((2^32 * th + tl) * 125 ) >> 9; 212 * ns = ((2^9 * th + tl) * 125 ) >> 9;
213 * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); 213 * -> ns = (th * 125) + ((tl * 125) >> 9);
214 * 214 *
215 */ 215 */
216static inline unsigned long long tod_to_ns(unsigned long long todval) 216static inline unsigned long long tod_to_ns(unsigned long long todval)
217{ 217{
218 unsigned long long ns; 218 return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
219
220 ns = ((todval >> 32) << 23) * 125;
221 ns += ((todval & 0xffffffff) * 125) >> 9;
222 return ns;
223} 219}
224 220
225#endif 221#endif
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4384bc797a54..152de9b796e1 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -313,7 +313,9 @@
313#define __NR_copy_file_range 375 313#define __NR_copy_file_range 375
314#define __NR_preadv2 376 314#define __NR_preadv2 376
315#define __NR_pwritev2 377 315#define __NR_pwritev2 377
316#define NR_syscalls 378 316/* Number 378 is reserved for guarded storage */
317#define __NR_statx 379
318#define NR_syscalls 380
317 319
318/* 320/*
319 * There are some system calls that are not present on 64 bit, some 321 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index ae2cda5eee5a..e89cc2e71db1 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); 178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
179COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); 179COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
180COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); 180COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
181COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dff2152350a7..6a7d737d514c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -490,7 +490,7 @@ ENTRY(pgm_check_handler)
490 jnz .Lpgm_svcper # -> single stepped svc 490 jnz .Lpgm_svcper # -> single stepped svc
4911: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 4911: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
492 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 492 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
493 j 3f 493 j 4f
4942: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 4942: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
495 lg %r15,__LC_KERNEL_STACK 495 lg %r15,__LC_KERNEL_STACK
496 lgr %r14,%r12 496 lgr %r14,%r12
@@ -499,8 +499,8 @@ ENTRY(pgm_check_handler)
499 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 499 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
500 jz 3f 500 jz 3f
501 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 501 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
5023: la %r11,STACK_FRAME_OVERHEAD(%r15) 5023: stg %r10,__THREAD_last_break(%r14)
503 stg %r10,__THREAD_last_break(%r14) 5034: la %r11,STACK_FRAME_OVERHEAD(%r15)
504 stmg %r0,%r7,__PT_R0(%r11) 504 stmg %r0,%r7,__PT_R0(%r11)
505 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 505 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
506 stmg %r8,%r9,__PT_PSW(%r11) 506 stmg %r8,%r9,__PT_PSW(%r11)
@@ -509,14 +509,14 @@ ENTRY(pgm_check_handler)
509 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 509 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
510 stg %r10,__PT_ARGS(%r11) 510 stg %r10,__PT_ARGS(%r11)
511 tm __LC_PGM_ILC+3,0x80 # check for per exception 511 tm __LC_PGM_ILC+3,0x80 # check for per exception
512 jz 4f 512 jz 5f
513 tmhh %r8,0x0001 # kernel per event ? 513 tmhh %r8,0x0001 # kernel per event ?
514 jz .Lpgm_kprobe 514 jz .Lpgm_kprobe
515 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 515 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
516 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 516 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
517 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 517 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
518 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 518 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
5194: REENABLE_IRQS 5195: REENABLE_IRQS
520 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 520 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
521 larl %r1,pgm_check_table 521 larl %r1,pgm_check_table
522 llgh %r10,__PT_INT_CODE+2(%r11) 522 llgh %r10,__PT_INT_CODE+2(%r11)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index b67dafb7b7cf..e545ffe5155a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,6 +564,8 @@ static struct kset *ipl_kset;
564 564
565static void __ipl_run(void *unused) 565static void __ipl_run(void *unused)
566{ 566{
567 if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
568 diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
567 diag308(DIAG308_LOAD_CLEAR, NULL); 569 diag308(DIAG308_LOAD_CLEAR, NULL);
568 if (MACHINE_IS_VM) 570 if (MACHINE_IS_VM)
569 __cpcmd("IPL", NULL, 0, NULL); 571 __cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 20cd339e11ae..f29e41c5e2ec 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -124,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
124 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 124 clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
125 /* Initialize per thread user and system timer values */ 125 /* Initialize per thread user and system timer values */
126 p->thread.user_timer = 0; 126 p->thread.user_timer = 0;
127 p->thread.guest_timer = 0;
127 p->thread.system_timer = 0; 128 p->thread.system_timer = 0;
129 p->thread.hardirq_timer = 0;
130 p->thread.softirq_timer = 0;
128 131
129 frame->sf.back_chain = 0; 132 frame->sf.back_chain = 0;
130 /* new return point is ret_from_fork */ 133 /* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9b59e6212d8f..2659b5cfeddb 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
386SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ 386SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
387SYSCALL(sys_preadv2,compat_sys_preadv2) 387SYSCALL(sys_preadv2,compat_sys_preadv2)
388SYSCALL(sys_pwritev2,compat_sys_pwritev2) 388SYSCALL(sys_pwritev2,compat_sys_pwritev2)
389NI_SYSCALL
390SYSCALL(sys_statx,compat_sys_statx)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c14fc9029912..072d84ba42a3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime)
111} 111}
112 112
113static void account_system_index_scaled(struct task_struct *p, 113static void account_system_index_scaled(struct task_struct *p,
114 cputime_t cputime, cputime_t scaled, 114 u64 cputime, u64 scaled,
115 enum cpu_usage_stat index) 115 enum cpu_usage_stat index)
116{ 116{
117 p->stimescaled += cputime_to_nsecs(scaled); 117 p->stimescaled += cputime_to_nsecs(scaled);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b48dc5f1900b..463e5ef02304 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
608bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) 608bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
609{ 609{
610 spinlock_t *ptl; 610 spinlock_t *ptl;
611 pgd_t *pgd;
612 pud_t *pud;
613 pmd_t *pmd;
611 pgste_t pgste; 614 pgste_t pgste;
612 pte_t *ptep; 615 pte_t *ptep;
613 pte_t pte; 616 pte_t pte;
614 bool dirty; 617 bool dirty;
615 618
616 ptep = get_locked_pte(mm, addr, &ptl); 619 pgd = pgd_offset(mm, addr);
620 pud = pud_alloc(mm, pgd, addr);
621 if (!pud)
622 return false;
623 pmd = pmd_alloc(mm, pud, addr);
624 if (!pmd)
625 return false;
626 /* We can't run guests backed by huge pages, but userspace can
627 * still set them up and then try to migrate them without any
628 * migration support.
629 */
630 if (pmd_large(*pmd))
631 return true;
632
633 ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
617 if (unlikely(!ptep)) 634 if (unlikely(!ptep))
618 return false; 635 return false;
619 636
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index 0553e5cd5985..46ff8fd678a7 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -2,6 +2,7 @@
2#define _ASM_SCORE_PGTABLE_H 2#define _ASM_SCORE_PGTABLE_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#define __ARCH_USE_5LEVEL_HACK
5#include <asm-generic/pgtable-nopmd.h> 6#include <asm-generic/pgtable-nopmd.h>
6 7
7#include <asm/fixmap.h> 8#include <asm/fixmap.h>
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index e359ec675869..12daf45369b4 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/extable.h> 26#include <linux/extable.h>
27#include <linux/ptrace.h>
27#include <linux/sched/mm.h> 28#include <linux/sched/mm.h>
28#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
29#include <linux/sched/debug.h> 30#include <linux/sched/debug.h>
diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c
index ec871355fc2d..6736a3ad6286 100644
--- a/arch/score/mm/extable.c
+++ b/arch/score/mm/extable.c
@@ -24,6 +24,8 @@
24 */ 24 */
25 25
26#include <linux/extable.h> 26#include <linux/extable.h>
27#include <linux/ptrace.h>
28#include <asm/extable.h>
27 29
28int fixup_exception(struct pt_regs *regs) 30int fixup_exception(struct pt_regs *regs)
29{ 31{
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c
index 340fd40b381d..9c292c27e0d7 100644
--- a/arch/sh/boards/mach-cayman/setup.c
+++ b/arch/sh/boards/mach-cayman/setup.c
@@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void)
128 SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); 128 SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
129 SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); 129 SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
130 130
131#ifdef CONFIG_IDE
132 /* 131 /*
133 * Only IDE1 exists on the Cayman 132 * Only IDE1 exists on the Cayman
134 */ 133 */
@@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void)
158 SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ 157 SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
159 SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ 158 SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
160 SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ 159 SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
161#endif
162 160
163 /* Exit the configuration state */ 161 /* Exit the configuration state */
164 outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); 162 outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
index 19bd89db17e7..f75cf4387257 100644
--- a/arch/sh/include/asm/pgtable-2level.h
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -1,6 +1,7 @@
1#ifndef __ASM_SH_PGTABLE_2LEVEL_H 1#ifndef __ASM_SH_PGTABLE_2LEVEL_H
2#define __ASM_SH_PGTABLE_2LEVEL_H 2#define __ASM_SH_PGTABLE_2LEVEL_H
3 3
4#define __ARCH_USE_5LEVEL_HACK
4#include <asm-generic/pgtable-nopmd.h> 5#include <asm-generic/pgtable-nopmd.h>
5 6
6/* 7/*
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 249a985d9648..9b1e776eca31 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -1,6 +1,7 @@
1#ifndef __ASM_SH_PGTABLE_3LEVEL_H 1#ifndef __ASM_SH_PGTABLE_3LEVEL_H
2#define __ASM_SH_PGTABLE_3LEVEL_H 2#define __ASM_SH_PGTABLE_3LEVEL_H
3 3
4#define __ARCH_USE_5LEVEL_HACK
4#include <asm-generic/pgtable-nopud.h> 5#include <asm-generic/pgtable-nopud.h>
5 6
6/* 7/*
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 56e49c8f770d..8a598528ec1f 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -12,6 +12,7 @@
12 * the SpitFire page tables. 12 * the SpitFire page tables.
13 */ 13 */
14 14
15#include <asm-generic/5level-fixup.h>
15#include <linux/compiler.h> 16#include <linux/compiler.h>
16#include <linux/const.h> 17#include <linux/const.h>
17#include <asm/types.h> 18#include <asm/types.h>
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index d26a42279036..5f8c615cb5e9 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
74#define MAXMEM (_VMALLOC_START - PAGE_OFFSET) 74#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
75 75
76/* We have no pmd or pud since we are strictly a two-level page table */ 76/* We have no pmd or pud since we are strictly a two-level page table */
77#define __ARCH_USE_5LEVEL_HACK
77#include <asm-generic/pgtable-nopmd.h> 78#include <asm-generic/pgtable-nopmd.h>
78 79
79static inline int pud_huge_page(pud_t pud) { return 0; } 80static inline int pud_huge_page(pud_t pud) { return 0; }
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index e96cec52f6d8..96fe58b45118 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -59,6 +59,7 @@
59#ifndef __ASSEMBLY__ 59#ifndef __ASSEMBLY__
60 60
61/* We have no pud since we are a three-level page table. */ 61/* We have no pud since we are a three-level page table. */
62#define __ARCH_USE_5LEVEL_HACK
62#include <asm-generic/pgtable-nopud.h> 63#include <asm-generic/pgtable-nopud.h>
63 64
64/* 65/*
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index cfbe59752469..179c0ea87a0c 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -8,6 +8,7 @@
8#ifndef __UM_PGTABLE_2LEVEL_H 8#ifndef __UM_PGTABLE_2LEVEL_H
9#define __UM_PGTABLE_2LEVEL_H 9#define __UM_PGTABLE_2LEVEL_H
10 10
11#define __ARCH_USE_5LEVEL_HACK
11#include <asm-generic/pgtable-nopmd.h> 12#include <asm-generic/pgtable-nopmd.h>
12 13
13/* PGDIR_SHIFT determines what a third-level page table entry can map */ 14/* PGDIR_SHIFT determines what a third-level page table entry can map */
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index bae8523a162f..c4d876dfb9ac 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -7,6 +7,7 @@
7#ifndef __UM_PGTABLE_3LEVEL_H 7#ifndef __UM_PGTABLE_3LEVEL_H
8#define __UM_PGTABLE_3LEVEL_H 8#define __UM_PGTABLE_3LEVEL_H
9 9
10#define __ARCH_USE_5LEVEL_HACK
10#include <asm-generic/pgtable-nopud.h> 11#include <asm-generic/pgtable-nopud.h>
11 12
12/* PGDIR_SHIFT determines what a third-level page table entry can map */ 13/* PGDIR_SHIFT determines what a third-level page table entry can map */
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 818d0f5598e3..a4f2bef37e70 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -12,6 +12,7 @@
12#ifndef __UNICORE_PGTABLE_H__ 12#ifndef __UNICORE_PGTABLE_H__
13#define __UNICORE_PGTABLE_H__ 13#define __UNICORE_PGTABLE_H__
14 14
15#define __ARCH_USE_5LEVEL_HACK
15#include <asm-generic/pgtable-nopmd.h> 16#include <asm-generic/pgtable-nopmd.h>
16#include <asm/cpu-single.h> 17#include <asm/cpu-single.h>
17 18
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 7ef4a099defc..6205d3b81e6d 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -176,6 +176,7 @@ CONFIG_E1000E=y
176CONFIG_SKY2=y 176CONFIG_SKY2=y
177CONFIG_FORCEDETH=y 177CONFIG_FORCEDETH=y
178CONFIG_8139TOO=y 178CONFIG_8139TOO=y
179CONFIG_R8169=y
179CONFIG_FDDI=y 180CONFIG_FDDI=y
180CONFIG_INPUT_POLLDEV=y 181CONFIG_INPUT_POLLDEV=y
181# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 182# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index afb222b63cae..c84584bb9402 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
604 return &amd_f15_PMC20; 604 return &amd_f15_PMC20;
605 } 605 }
606 case AMD_EVENT_NB: 606 case AMD_EVENT_NB:
607 /* moved to perf_event_amd_uncore.c */ 607 /* moved to uncore.c */
608 return &emptyconstraint; 608 return &emptyconstraint;
609 default: 609 default:
610 return &emptyconstraint; 610 return &emptyconstraint;
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index aff4b5b69d40..238ae3248ba5 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * perf_event_intel_cstate.c: support cstate residency counters 2 * Support cstate residency counters
3 * 3 *
4 * Copyright (C) 2015, Intel Corp. 4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com) 5 * Author: Kan Liang (kan.liang@intel.com)
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 22054ca49026..9d05c7e67f60 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters 2 * Support Intel RAPL energy consumption counters
3 * Copyright (C) 2013 Google, Inc., Stephane Eranian 3 * Copyright (C) 2013 Google, Inc., Stephane Eranian
4 * 4 *
5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b 5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index ad986c1e29bc..df5989f27b1b 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head;
360extern struct pci_extra_dev *uncore_extra_pci_dev; 360extern struct pci_extra_dev *uncore_extra_pci_dev;
361extern struct event_constraint uncore_constraint_empty; 361extern struct event_constraint uncore_constraint_empty;
362 362
363/* perf_event_intel_uncore_snb.c */ 363/* uncore_snb.c */
364int snb_uncore_pci_init(void); 364int snb_uncore_pci_init(void);
365int ivb_uncore_pci_init(void); 365int ivb_uncore_pci_init(void);
366int hsw_uncore_pci_init(void); 366int hsw_uncore_pci_init(void);
@@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void);
371void skl_uncore_cpu_init(void); 371void skl_uncore_cpu_init(void);
372int snb_pci2phy_map_init(int devid); 372int snb_pci2phy_map_init(int devid);
373 373
374/* perf_event_intel_uncore_snbep.c */ 374/* uncore_snbep.c */
375int snbep_uncore_pci_init(void); 375int snbep_uncore_pci_init(void);
376void snbep_uncore_cpu_init(void); 376void snbep_uncore_cpu_init(void);
377int ivbep_uncore_pci_init(void); 377int ivbep_uncore_pci_init(void);
@@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void);
385int skx_uncore_pci_init(void); 385int skx_uncore_pci_init(void);
386void skx_uncore_cpu_init(void); 386void skx_uncore_cpu_init(void);
387 387
388/* perf_event_intel_uncore_nhmex.c */ 388/* uncore_nhmex.c */
389void nhmex_uncore_cpu_init(void); 389void nhmex_uncore_cpu_init(void);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index db64baf0e500..8bef70e7f3cc 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -158,13 +158,13 @@ void hyperv_init(void)
158 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); 158 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
159 return; 159 return;
160 } 160 }
161register_msr_cs:
161#endif 162#endif
162 /* 163 /*
163 * For 32 bit guests just use the MSR based mechanism for reading 164 * For 32 bit guests just use the MSR based mechanism for reading
164 * the partition counter. 165 * the partition counter.
165 */ 166 */
166 167
167register_msr_cs:
168 hyperv_cs = &hyperv_cs_msr; 168 hyperv_cs = &hyperv_cs_msr;
169 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) 169 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
170 clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); 170 clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4e7772387c6e..b04bb6dfed7f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -289,7 +289,8 @@
289#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ 289#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
290#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ 290#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
291#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 291#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
292#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */ 292#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
293#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
293 294
294/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ 295/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
295#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ 296#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 72277b1028a5..50d35e3185f5 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
121 *(tmp + 1) = 0; 121 *(tmp + 1) = 0;
122} 122}
123 123
124#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
125 defined(CONFIG_PARAVIRT))
126static inline void native_pud_clear(pud_t *pudp) 124static inline void native_pud_clear(pud_t *pudp)
127{ 125{
128} 126}
129#endif
130 127
131static inline void pud_clear(pud_t *pudp) 128static inline void pud_clear(pud_t *pudp)
132{ 129{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1cfb36b8c024..585ee0d42d18 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
62# define set_pud(pudp, pud) native_set_pud(pudp, pud) 62# define set_pud(pudp, pud) native_set_pud(pudp, pud)
63#endif 63#endif
64 64
65#ifndef __PAGETABLE_PMD_FOLDED 65#ifndef __PAGETABLE_PUD_FOLDED
66#define pud_clear(pud) native_pud_clear(pud) 66#define pud_clear(pud) native_pud_clear(pud)
67#endif 67#endif
68 68
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8b4de22d6429..62484333673d 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
273} 273}
274 274
275#if CONFIG_PGTABLE_LEVELS > 3 275#if CONFIG_PGTABLE_LEVELS > 3
276#include <asm-generic/5level-fixup.h>
277
276typedef struct { pudval_t pud; } pud_t; 278typedef struct { pudval_t pud; } pud_t;
277 279
278static inline pud_t native_make_pud(pmdval_t val) 280static inline pud_t native_make_pud(pmdval_t val)
@@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud)
285 return pud.pud; 287 return pud.pud;
286} 288}
287#else 289#else
290#define __ARCH_USE_5LEVEL_HACK
288#include <asm-generic/pgtable-nopud.h> 291#include <asm-generic/pgtable-nopud.h>
289 292
290static inline pudval_t native_pud_val(pud_t pud) 293static inline pudval_t native_pud_val(pud_t pud)
@@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
306 return pmd.pmd; 309 return pmd.pmd;
307} 310}
308#else 311#else
312#define __ARCH_USE_5LEVEL_HACK
309#include <asm-generic/pgtable-nopmd.h> 313#include <asm-generic/pgtable-nopmd.h>
310 314
311static inline pmdval_t native_pmd_val(pmd_t pmd) 315static inline pmdval_t native_pmd_val(pmd_t pmd)
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 34684adb6899..b3b09b98896d 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
46static inline 46static inline
47bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) 47bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
48{ 48{
49 /*
50 * "Allocated" pkeys are those that have been returned
51 * from pkey_alloc(). pkey 0 is special, and never
52 * returned from pkey_alloc().
53 */
54 if (pkey <= 0)
55 return false;
56 if (pkey >= arch_max_pkey())
57 return false;
49 return mm_pkey_allocation_map(mm) & (1U << pkey); 58 return mm_pkey_allocation_map(mm) & (1U << pkey);
50} 59}
51 60
@@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm)
82static inline 91static inline
83int mm_pkey_free(struct mm_struct *mm, int pkey) 92int mm_pkey_free(struct mm_struct *mm, int pkey)
84{ 93{
85 /*
86 * pkey 0 is special, always allocated and can never
87 * be freed.
88 */
89 if (!pkey)
90 return -EINVAL;
91 if (!mm_pkey_is_allocated(mm, pkey)) 94 if (!mm_pkey_is_allocated(mm, pkey))
92 return -EINVAL; 95 return -EINVAL;
93 96
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h
new file mode 100644
index 000000000000..d7da2729903d
--- /dev/null
+++ b/arch/x86/include/asm/purgatory.h
@@ -0,0 +1,20 @@
1#ifndef _ASM_X86_PURGATORY_H
2#define _ASM_X86_PURGATORY_H
3
4#ifndef __ASSEMBLY__
5#include <linux/purgatory.h>
6
7extern void purgatory(void);
8/*
9 * These forward declarations serve two purposes:
10 *
11 * 1) Make sparse happy when checking arch/purgatory
12 * 2) Document that these are required to be global so the symbol
13 * lookup in kexec works
14 */
15extern unsigned long purgatory_backup_dest;
16extern unsigned long purgatory_backup_src;
17extern unsigned long purgatory_backup_sz;
18#endif /* __ASSEMBLY__ */
19
20#endif /* _ASM_PURGATORY_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6fa85944af83..fc5abff9b7fd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
188 188
189static inline void __flush_tlb_all(void) 189static inline void __flush_tlb_all(void)
190{ 190{
191 if (static_cpu_has(X86_FEATURE_PGE)) 191 if (boot_cpu_has(X86_FEATURE_PGE))
192 __flush_tlb_global(); 192 __flush_tlb_global();
193 else 193 else
194 __flush_tlb(); 194 __flush_tlb();
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 5138dacf8bb8..07244ea16765 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -58,7 +58,7 @@ struct setup_header {
58 __u32 header; 58 __u32 header;
59 __u16 version; 59 __u16 version;
60 __u32 realmode_swtch; 60 __u32 realmode_swtch;
61 __u16 start_sys; 61 __u16 start_sys_seg;
62 __u16 kernel_version; 62 __u16 kernel_version;
63 __u8 type_of_loader; 63 __u8 type_of_loader;
64 __u8 loadflags; 64 __u8 loadflags;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4261b3282ad9..aee7deddabd0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { }
1610static inline void __x2apic_enable(void) { } 1610static inline void __x2apic_enable(void) { }
1611#endif /* !CONFIG_X86_X2APIC */ 1611#endif /* !CONFIG_X86_X2APIC */
1612 1612
1613static int __init try_to_enable_IR(void)
1614{
1615#ifdef CONFIG_X86_IO_APIC
1616 if (!x2apic_enabled() && skip_ioapic_setup) {
1617 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1618 return -1;
1619 }
1620#endif
1621 return irq_remapping_enable();
1622}
1623
1624void __init enable_IR_x2apic(void) 1613void __init enable_IR_x2apic(void)
1625{ 1614{
1626 unsigned long flags; 1615 unsigned long flags;
1627 int ret, ir_stat; 1616 int ret, ir_stat;
1628 1617
1629 if (skip_ioapic_setup) 1618 if (skip_ioapic_setup) {
1619 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1630 return; 1620 return;
1621 }
1631 1622
1632 ir_stat = irq_remapping_prepare(); 1623 ir_stat = irq_remapping_prepare();
1633 if (ir_stat < 0 && !x2apic_supported()) 1624 if (ir_stat < 0 && !x2apic_supported())
@@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void)
1645 1636
1646 /* If irq_remapping_prepare() succeeded, try to enable it */ 1637 /* If irq_remapping_prepare() succeeded, try to enable it */
1647 if (ir_stat >= 0) 1638 if (ir_stat >= 0)
1648 ir_stat = try_to_enable_IR(); 1639 ir_stat = irq_remapping_enable();
1649 /* ir_stat contains the remap mode or an error code */ 1640 /* ir_stat contains the remap mode or an error code */
1650 try_to_enable_x2apic(ir_stat); 1641 try_to_enable_x2apic(ir_stat);
1651 1642
@@ -2062,10 +2053,10 @@ static int allocate_logical_cpuid(int apicid)
2062 2053
2063 /* Allocate a new cpuid. */ 2054 /* Allocate a new cpuid. */
2064 if (nr_logical_cpuids >= nr_cpu_ids) { 2055 if (nr_logical_cpuids >= nr_cpu_ids) {
2065 WARN_ONCE(1, "Only %d processors supported." 2056 WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. "
2066 "Processor %d/0x%x and the rest are ignored.\n", 2057 "Processor %d/0x%x and the rest are ignored.\n",
2067 nr_cpu_ids - 1, nr_logical_cpuids, apicid); 2058 nr_cpu_ids, nr_logical_cpuids, apicid);
2068 return -1; 2059 return -EINVAL;
2069 } 2060 }
2070 2061
2071 cpuid_to_apicid[nr_logical_cpuids] = apicid; 2062 cpuid_to_apicid[nr_logical_cpuids] = apicid;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 35a5d5dca2fa..c36140d788fe 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
556 if (c->x86_power & (1 << 8)) { 556 if (c->x86_power & (1 << 8)) {
557 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 557 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
558 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 558 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
559 if (check_tsc_unstable())
560 clear_sched_clock_stable();
561 } else {
562 clear_sched_clock_stable();
563 } 559 }
564 560
565 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 561 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index adc0ebd8bed0..43955ee6715b 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
105#ifdef CONFIG_X86_64 105#ifdef CONFIG_X86_64
106 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 106 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
107#endif 107#endif
108
109 clear_sched_clock_stable();
110} 108}
111 109
112static void init_centaur(struct cpuinfo_x86 *c) 110static void init_centaur(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b11b38c3b0bd..58094a1f9e9d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c)
88 strcpy(c->x86_model_id, "386"); 88 strcpy(c->x86_model_id, "386");
89 } 89 }
90#endif 90#endif
91 clear_sched_clock_stable();
92} 91}
93 92
94static const struct cpu_dev default_cpu = { 93static const struct cpu_dev default_cpu = {
@@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1077 */ 1076 */
1078 if (this_cpu->c_init) 1077 if (this_cpu->c_init)
1079 this_cpu->c_init(c); 1078 this_cpu->c_init(c);
1080 else
1081 clear_sched_clock_stable();
1082 1079
1083 /* Disable the PN if appropriate */ 1080 /* Disable the PN if appropriate */
1084 squash_the_stupid_serial_number(c); 1081 squash_the_stupid_serial_number(c);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 0a3bc19de017..a70fd61095f8 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
185 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 185 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
186 break; 186 break;
187 } 187 }
188 clear_sched_clock_stable();
189} 188}
190 189
191static void init_cyrix(struct cpuinfo_x86 *c) 190static void init_cyrix(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fe0a615a051b..063197771b8d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
162 if (c->x86_power & (1 << 8)) { 162 if (c->x86_power & (1 << 8)) {
163 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 163 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
164 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 164 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
165 if (check_tsc_unstable())
166 clear_sched_clock_stable();
167 } else {
168 clear_sched_clock_stable();
169 } 165 }
170 166
171 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 167 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 0bbe0f3a039f..c05509d38b1f 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -28,7 +28,6 @@
28#include <linux/sched/signal.h> 28#include <linux/sched/signal.h>
29#include <linux/sched/task.h> 29#include <linux/sched/task.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/cpu.h>
32#include <linux/task_work.h> 31#include <linux/task_work.h>
33 32
34#include <uapi/linux/magic.h> 33#include <uapi/linux/magic.h>
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 8457b4978668..d77d07ab310b 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
16 if (xlvl >= 0x80860001) 16 if (xlvl >= 0x80860001)
17 c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); 17 c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
18 } 18 }
19
20 clear_sched_clock_stable();
21} 19}
22 20
23static void init_transmeta(struct cpuinfo_x86 *c) 21static void init_transmeta(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 891f4dad7b2c..22403a28caf5 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -30,7 +30,6 @@
30#include <asm/hypervisor.h> 30#include <asm/hypervisor.h>
31#include <asm/timer.h> 31#include <asm/timer.h>
32#include <asm/apic.h> 32#include <asm/apic.h>
33#include <asm/timer.h>
34 33
35#undef pr_fmt 34#undef pr_fmt
36#define pr_fmt(fmt) "vmware: " fmt 35#define pr_fmt(fmt) "vmware: " fmt
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8639bb2ae058..8f3d9cf26ff9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -535,7 +535,7 @@ static void run_sync(void)
535{ 535{
536 int enable_irqs = irqs_disabled(); 536 int enable_irqs = irqs_disabled();
537 537
538 /* We may be called with interrupts disbled (on bootup). */ 538 /* We may be called with interrupts disabled (on bootup). */
539 if (enable_irqs) 539 if (enable_irqs)
540 local_irq_enable(); 540 local_irq_enable();
541 on_each_cpu(do_sync_core, NULL, 1); 541 on_each_cpu(do_sync_core, NULL, 1);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index dc6ba5bda9fc..89ff7af2de50 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
354 354
355 irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); 355 irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
356 irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); 356 irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
357 disable_irq(hdev->irq); 357 disable_hardirq(hdev->irq);
358 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 358 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
359 enable_irq(hdev->irq); 359 enable_irq(hdev->irq);
360 } 360 }
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index bdb83e431d89..38b64587b31b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void)
167 struct dentry *dbp, *version, *data; 167 struct dentry *dbp, *version, *data;
168 int error = -ENOMEM; 168 int error = -ENOMEM;
169 169
170 dbp = debugfs_create_dir("boot_params", NULL); 170 dbp = debugfs_create_dir("boot_params", arch_debugfs_dir);
171 if (!dbp) 171 if (!dbp)
172 return -ENOMEM; 172 return -ENOMEM;
173 173
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c6ee63f927ab..d688826e5736 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
67#endif 67#endif
68 68
69/* Ensure if the instruction can be boostable */ 69/* Ensure if the instruction can be boostable */
70extern int can_boost(kprobe_opcode_t *instruction); 70extern int can_boost(kprobe_opcode_t *instruction, void *addr);
71/* Recover instruction if given address is probed */ 71/* Recover instruction if given address is probed */
72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, 72extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
73 unsigned long addr); 73 unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6384eb754a58..993fa4fe4f68 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -167,12 +167,12 @@ NOKPROBE_SYMBOL(skip_prefixes);
167 * Returns non-zero if opcode is boostable. 167 * Returns non-zero if opcode is boostable.
168 * RIP relative instructions are adjusted at copying time in 64 bits mode 168 * RIP relative instructions are adjusted at copying time in 64 bits mode
169 */ 169 */
170int can_boost(kprobe_opcode_t *opcodes) 170int can_boost(kprobe_opcode_t *opcodes, void *addr)
171{ 171{
172 kprobe_opcode_t opcode; 172 kprobe_opcode_t opcode;
173 kprobe_opcode_t *orig_opcodes = opcodes; 173 kprobe_opcode_t *orig_opcodes = opcodes;
174 174
175 if (search_exception_tables((unsigned long)opcodes)) 175 if (search_exception_tables((unsigned long)addr))
176 return 0; /* Page fault may occur on this address. */ 176 return 0; /* Page fault may occur on this address. */
177 177
178retry: 178retry:
@@ -417,7 +417,7 @@ static int arch_copy_kprobe(struct kprobe *p)
417 * __copy_instruction can modify the displacement of the instruction, 417 * __copy_instruction can modify the displacement of the instruction,
418 * but it doesn't affect boostable check. 418 * but it doesn't affect boostable check.
419 */ 419 */
420 if (can_boost(p->ainsn.insn)) 420 if (can_boost(p->ainsn.insn, p->addr))
421 p->ainsn.boostable = 0; 421 p->ainsn.boostable = 0;
422 else 422 else
423 p->ainsn.boostable = -1; 423 p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 3d1bee9d6a72..3e7c6e5a08ff 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
178 178
179 while (len < RELATIVEJUMP_SIZE) { 179 while (len < RELATIVEJUMP_SIZE) {
180 ret = __copy_instruction(dest + len, src + len); 180 ret = __copy_instruction(dest + len, src + len);
181 if (!ret || !can_boost(dest + len)) 181 if (!ret || !can_boost(dest + len, src + len))
182 return -EINVAL; 182 return -EINVAL;
183 len += ret; 183 len += ret;
184 } 184 }
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 307b1f4543de..857cdbd02867 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image)
194 194
195 /* Setup copying of backup region */ 195 /* Setup copying of backup region */
196 if (image->type == KEXEC_TYPE_CRASH) { 196 if (image->type == KEXEC_TYPE_CRASH) {
197 ret = kexec_purgatory_get_set_symbol(image, "backup_dest", 197 ret = kexec_purgatory_get_set_symbol(image,
198 "purgatory_backup_dest",
198 &image->arch.backup_load_addr, 199 &image->arch.backup_load_addr,
199 sizeof(image->arch.backup_load_addr), 0); 200 sizeof(image->arch.backup_load_addr), 0);
200 if (ret) 201 if (ret)
201 return ret; 202 return ret;
202 203
203 ret = kexec_purgatory_get_set_symbol(image, "backup_src", 204 ret = kexec_purgatory_get_set_symbol(image,
205 "purgatory_backup_src",
204 &image->arch.backup_src_start, 206 &image->arch.backup_src_start,
205 sizeof(image->arch.backup_src_start), 0); 207 sizeof(image->arch.backup_src_start), 0);
206 if (ret) 208 if (ret)
207 return ret; 209 return ret;
208 210
209 ret = kexec_purgatory_get_set_symbol(image, "backup_sz", 211 ret = kexec_purgatory_get_set_symbol(image,
212 "purgatory_backup_sz",
210 &image->arch.backup_src_sz, 213 &image->arch.backup_src_sz,
211 sizeof(image->arch.backup_src_sz), 0); 214 sizeof(image->arch.backup_src_sz), 0);
212 if (ret) 215 if (ret)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e244c19a2451..067f9813fd2c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -223,6 +223,22 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
223 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 223 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
224 }, 224 },
225 }, 225 },
226 { /* Handle problems with rebooting on ASUS EeeBook X205TA */
227 .callback = set_acpi_reboot,
228 .ident = "ASUS EeeBook X205TA",
229 .matches = {
230 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
231 DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"),
232 },
233 },
234 { /* Handle problems with rebooting on ASUS EeeBook X205TAW */
235 .callback = set_acpi_reboot,
236 .ident = "ASUS EeeBook X205TAW",
237 .matches = {
238 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
239 DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
240 },
241 },
226 242
227 /* Certec */ 243 /* Certec */
228 { /* Handle problems with rebooting on Certec BPC600 */ 244 { /* Handle problems with rebooting on Certec BPC600 */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 46bcda4cb1c2..4f7a9833d8e5 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -327,9 +327,16 @@ unsigned long long sched_clock(void)
327{ 327{
328 return paravirt_sched_clock(); 328 return paravirt_sched_clock();
329} 329}
330
331static inline bool using_native_sched_clock(void)
332{
333 return pv_time_ops.sched_clock == native_sched_clock;
334}
330#else 335#else
331unsigned long long 336unsigned long long
332sched_clock(void) __attribute__((alias("native_sched_clock"))); 337sched_clock(void) __attribute__((alias("native_sched_clock")));
338
339static inline bool using_native_sched_clock(void) { return true; }
333#endif 340#endif
334 341
335int check_tsc_unstable(void) 342int check_tsc_unstable(void)
@@ -1112,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
1112{ 1119{
1113 if (tsc_unstable) 1120 if (tsc_unstable)
1114 return; 1121 return;
1122
1115 tsc_unstable = 1; 1123 tsc_unstable = 1;
1116 clear_sched_clock_stable(); 1124 if (using_native_sched_clock())
1125 clear_sched_clock_stable();
1117 disable_sched_clock_irqtime(); 1126 disable_sched_clock_irqtime();
1118 pr_info("Marking TSC unstable due to clocksource watchdog\n"); 1127 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1119} 1128}
@@ -1135,18 +1144,20 @@ static struct clocksource clocksource_tsc = {
1135 1144
1136void mark_tsc_unstable(char *reason) 1145void mark_tsc_unstable(char *reason)
1137{ 1146{
1138 if (!tsc_unstable) { 1147 if (tsc_unstable)
1139 tsc_unstable = 1; 1148 return;
1149
1150 tsc_unstable = 1;
1151 if (using_native_sched_clock())
1140 clear_sched_clock_stable(); 1152 clear_sched_clock_stable();
1141 disable_sched_clock_irqtime(); 1153 disable_sched_clock_irqtime();
1142 pr_info("Marking TSC unstable due to %s\n", reason); 1154 pr_info("Marking TSC unstable due to %s\n", reason);
1143 /* Change only the rating, when not registered */ 1155 /* Change only the rating, when not registered */
1144 if (clocksource_tsc.mult) 1156 if (clocksource_tsc.mult) {
1145 clocksource_mark_unstable(&clocksource_tsc); 1157 clocksource_mark_unstable(&clocksource_tsc);
1146 else { 1158 } else {
1147 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; 1159 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
1148 clocksource_tsc.rating = 0; 1160 clocksource_tsc.rating = 0;
1149 }
1150 } 1161 }
1151} 1162}
1152 1163
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 283aa8601833..98e82ee1e699 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7258,9 +7258,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
7258static int handle_vmclear(struct kvm_vcpu *vcpu) 7258static int handle_vmclear(struct kvm_vcpu *vcpu)
7259{ 7259{
7260 struct vcpu_vmx *vmx = to_vmx(vcpu); 7260 struct vcpu_vmx *vmx = to_vmx(vcpu);
7261 u32 zero = 0;
7261 gpa_t vmptr; 7262 gpa_t vmptr;
7262 struct vmcs12 *vmcs12;
7263 struct page *page;
7264 7263
7265 if (!nested_vmx_check_permission(vcpu)) 7264 if (!nested_vmx_check_permission(vcpu))
7266 return 1; 7265 return 1;
@@ -7271,22 +7270,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
7271 if (vmptr == vmx->nested.current_vmptr) 7270 if (vmptr == vmx->nested.current_vmptr)
7272 nested_release_vmcs12(vmx); 7271 nested_release_vmcs12(vmx);
7273 7272
7274 page = nested_get_page(vcpu, vmptr); 7273 kvm_vcpu_write_guest(vcpu,
7275 if (page == NULL) { 7274 vmptr + offsetof(struct vmcs12, launch_state),
7276 /* 7275 &zero, sizeof(zero));
7277 * For accurate processor emulation, VMCLEAR beyond available
7278 * physical memory should do nothing at all. However, it is
7279 * possible that a nested vmx bug, not a guest hypervisor bug,
7280 * resulted in this case, so let's shut down before doing any
7281 * more damage:
7282 */
7283 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7284 return 1;
7285 }
7286 vmcs12 = kmap(page);
7287 vmcs12->launch_state = 0;
7288 kunmap(page);
7289 nested_release_page(page);
7290 7276
7291 nested_free_vmcs02(vmx, vmptr); 7277 nested_free_vmcs02(vmx, vmptr);
7292 7278
@@ -9694,10 +9680,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
9694 return false; 9680 return false;
9695 9681
9696 page = nested_get_page(vcpu, vmcs12->msr_bitmap); 9682 page = nested_get_page(vcpu, vmcs12->msr_bitmap);
9697 if (!page) { 9683 if (!page)
9698 WARN_ON(1);
9699 return false; 9684 return false;
9700 }
9701 msr_bitmap_l1 = (unsigned long *)kmap(page); 9685 msr_bitmap_l1 = (unsigned long *)kmap(page);
9702 9686
9703 memset(msr_bitmap_l0, 0xff, PAGE_SIZE); 9687 memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
@@ -11121,8 +11105,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
11121 */ 11105 */
11122static void vmx_leave_nested(struct kvm_vcpu *vcpu) 11106static void vmx_leave_nested(struct kvm_vcpu *vcpu)
11123{ 11107{
11124 if (is_guest_mode(vcpu)) 11108 if (is_guest_mode(vcpu)) {
11109 to_vmx(vcpu)->nested.nested_run_pending = 0;
11125 nested_vmx_vmexit(vcpu, -1, 0, 0); 11110 nested_vmx_vmexit(vcpu, -1, 0, 0);
11111 }
11126 free_nested(to_vmx(vcpu)); 11112 free_nested(to_vmx(vcpu));
11127} 11113}
11128 11114
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 99c7805a9693..1f3b6ef105cd 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
106 unsigned long end, int write, struct page **pages, int *nr) 106 unsigned long end, int write, struct page **pages, int *nr)
107{ 107{
108 struct dev_pagemap *pgmap = NULL; 108 struct dev_pagemap *pgmap = NULL;
109 int nr_start = *nr; 109 int nr_start = *nr, ret = 0;
110 pte_t *ptep; 110 pte_t *ptep, *ptem;
111 111
112 ptep = pte_offset_map(&pmd, addr); 112 /*
113 * Keep the original mapped PTE value (ptem) around since we
114 * might increment ptep off the end of the page when finishing
115 * our loop iteration.
116 */
117 ptem = ptep = pte_offset_map(&pmd, addr);
113 do { 118 do {
114 pte_t pte = gup_get_pte(ptep); 119 pte_t pte = gup_get_pte(ptep);
115 struct page *page; 120 struct page *page;
116 121
117 /* Similar to the PMD case, NUMA hinting must take slow path */ 122 /* Similar to the PMD case, NUMA hinting must take slow path */
118 if (pte_protnone(pte)) { 123 if (pte_protnone(pte))
119 pte_unmap(ptep); 124 break;
120 return 0; 125
121 } 126 if (!pte_allows_gup(pte_val(pte), write))
127 break;
122 128
123 if (pte_devmap(pte)) { 129 if (pte_devmap(pte)) {
124 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 130 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
125 if (unlikely(!pgmap)) { 131 if (unlikely(!pgmap)) {
126 undo_dev_pagemap(nr, nr_start, pages); 132 undo_dev_pagemap(nr, nr_start, pages);
127 pte_unmap(ptep); 133 break;
128 return 0;
129 } 134 }
130 } else if (!pte_allows_gup(pte_val(pte), write) || 135 } else if (pte_special(pte))
131 pte_special(pte)) { 136 break;
132 pte_unmap(ptep); 137
133 return 0;
134 }
135 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 138 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
136 page = pte_page(pte); 139 page = pte_page(pte);
137 get_page(page); 140 get_page(page);
@@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
141 (*nr)++; 144 (*nr)++;
142 145
143 } while (ptep++, addr += PAGE_SIZE, addr != end); 146 } while (ptep++, addr += PAGE_SIZE, addr != end);
144 pte_unmap(ptep - 1); 147 if (addr == end)
148 ret = 1;
149 pte_unmap(ptem);
145 150
146 return 1; 151 return ret;
147} 152}
148 153
149static inline void get_head_page_multiple(struct page *page, int nr) 154static inline void get_head_page_multiple(struct page *page, int nr)
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 0cb52ae0a8f0..190e718694b1 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev)
735 pcibios_disable_irq(dev); 735 pcibios_disable_irq(dev);
736} 736}
737 737
738#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
739void pcibios_release_device(struct pci_dev *dev)
740{
741 if (atomic_dec_return(&dev->enable_cnt) >= 0)
742 pcibios_disable_device(dev);
743
744}
745#endif
746
738int pci_ext_cfg_avail(void) 747int pci_ext_cfg_avail(void)
739{ 748{
740 if (raw_pci_ext_ops) 749 if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index e1fb269c87af..292ab0364a89 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
234 return 1; 234 return 1;
235 235
236 for_each_pci_msi_entry(msidesc, dev) { 236 for_each_pci_msi_entry(msidesc, dev) {
237 __pci_read_msi_msg(msidesc, &msg); 237 pirq = xen_allocate_pirq_msi(dev, msidesc);
238 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | 238 if (pirq < 0) {
239 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); 239 irq = -ENODEV;
240 if (msg.data != XEN_PIRQ_MSI_DATA || 240 goto error;
241 xen_irq_from_pirq(pirq) < 0) {
242 pirq = xen_allocate_pirq_msi(dev, msidesc);
243 if (pirq < 0) {
244 irq = -ENODEV;
245 goto error;
246 }
247 xen_msi_compose_msg(dev, pirq, &msg);
248 __pci_write_msi_msg(msidesc, &msg);
249 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
250 } else {
251 dev_dbg(&dev->dev,
252 "xen: msi already bound to pirq=%d\n", pirq);
253 } 241 }
242 xen_msi_compose_msg(dev, pirq, &msg);
243 __pci_write_msi_msg(msidesc, &msg);
244 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
254 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 245 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
255 (type == PCI_CAP_ID_MSI) ? nvec : 1, 246 (type == PCI_CAP_ID_MSI) ? nvec : 1,
256 (type == PCI_CAP_ID_MSIX) ? 247 (type == PCI_CAP_ID_MSIX) ?
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 766d4d3529a1..f25982cdff90 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode)
1847 1847
1848 ops.write_payload_first(pnode, first); 1848 ops.write_payload_first(pnode, first);
1849 ops.write_payload_last(pnode, last); 1849 ops.write_payload_last(pnode, last);
1850 ops.write_g_sw_ack(pnode, 0xffffUL);
1851 1850
1852 /* in effect, all msg_type's are set to MSG_NOOP */ 1851 /* in effect, all msg_type's are set to MSG_NOOP */
1853 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); 1852 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 25e068ba3382..470edad96bb9 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -10,21 +10,19 @@
10 * Version 2. See the file COPYING for more details. 10 * Version 2. See the file COPYING for more details.
11 */ 11 */
12 12
13#include <linux/bug.h>
14#include <asm/purgatory.h>
15
13#include "sha256.h" 16#include "sha256.h"
14#include "../boot/string.h" 17#include "../boot/string.h"
15 18
16struct sha_region { 19unsigned long purgatory_backup_dest __section(.kexec-purgatory);
17 unsigned long start; 20unsigned long purgatory_backup_src __section(.kexec-purgatory);
18 unsigned long len; 21unsigned long purgatory_backup_sz __section(.kexec-purgatory);
19};
20
21unsigned long backup_dest = 0;
22unsigned long backup_src = 0;
23unsigned long backup_sz = 0;
24 22
25u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 }; 23u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory);
26 24
27struct sha_region sha_regions[16] = {}; 25struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory);
28 26
29/* 27/*
30 * On x86, second kernel requries first 640K of memory to boot. Copy 28 * On x86, second kernel requries first 640K of memory to boot. Copy
@@ -33,26 +31,28 @@ struct sha_region sha_regions[16] = {};
33 */ 31 */
34static int copy_backup_region(void) 32static int copy_backup_region(void)
35{ 33{
36 if (backup_dest) 34 if (purgatory_backup_dest) {
37 memcpy((void *)backup_dest, (void *)backup_src, backup_sz); 35 memcpy((void *)purgatory_backup_dest,
38 36 (void *)purgatory_backup_src, purgatory_backup_sz);
37 }
39 return 0; 38 return 0;
40} 39}
41 40
42int verify_sha256_digest(void) 41static int verify_sha256_digest(void)
43{ 42{
44 struct sha_region *ptr, *end; 43 struct kexec_sha_region *ptr, *end;
45 u8 digest[SHA256_DIGEST_SIZE]; 44 u8 digest[SHA256_DIGEST_SIZE];
46 struct sha256_state sctx; 45 struct sha256_state sctx;
47 46
48 sha256_init(&sctx); 47 sha256_init(&sctx);
49 end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])]; 48 end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
50 for (ptr = sha_regions; ptr < end; ptr++) 49
50 for (ptr = purgatory_sha_regions; ptr < end; ptr++)
51 sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); 51 sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
52 52
53 sha256_final(&sctx, digest); 53 sha256_final(&sctx, digest);
54 54
55 if (memcmp(digest, sha256_digest, sizeof(digest))) 55 if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
56 return 1; 56 return 1;
57 57
58 return 0; 58 return 0;
diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S
index fe3c91ba1bd0..dfae9b9e60b5 100644
--- a/arch/x86/purgatory/setup-x86_64.S
+++ b/arch/x86/purgatory/setup-x86_64.S
@@ -9,6 +9,7 @@
9 * This source code is licensed under the GNU General Public License, 9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details. 10 * Version 2. See the file COPYING for more details.
11 */ 11 */
12#include <asm/purgatory.h>
12 13
13 .text 14 .text
14 .globl purgatory_start 15 .globl purgatory_start
diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h
index bd15a4127735..2867d9825a57 100644
--- a/arch/x86/purgatory/sha256.h
+++ b/arch/x86/purgatory/sha256.h
@@ -10,7 +10,6 @@
10#ifndef SHA256_H 10#ifndef SHA256_H
11#define SHA256_H 11#define SHA256_H
12 12
13
14#include <linux/types.h> 13#include <linux/types.h>
15#include <crypto/sha.h> 14#include <crypto/sha.h>
16 15
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 8aa0e0d9cbb2..30dd5b2e4ad5 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -11,6 +11,7 @@
11#ifndef _XTENSA_PGTABLE_H 11#ifndef _XTENSA_PGTABLE_H
12#define _XTENSA_PGTABLE_H 12#define _XTENSA_PGTABLE_H
13 13
14#define __ARCH_USE_5LEVEL_HACK
14#include <asm-generic/pgtable-nopmd.h> 15#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h> 16#include <asm/page.h>
16#include <asm/kmem_layout.h> 17#include <asm/kmem_layout.h>
diff --git a/block/bio.c b/block/bio.c
index 5eec5e08417f..e75878f8b14a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
376 bio_list_init(&punt); 376 bio_list_init(&punt);
377 bio_list_init(&nopunt); 377 bio_list_init(&nopunt);
378 378
379 while ((bio = bio_list_pop(current->bio_list))) 379 while ((bio = bio_list_pop(&current->bio_list[0])))
380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
381 current->bio_list[0] = nopunt;
381 382
382 *current->bio_list = nopunt; 383 bio_list_init(&nopunt);
384 while ((bio = bio_list_pop(&current->bio_list[1])))
385 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
386 current->bio_list[1] = nopunt;
383 387
384 spin_lock(&bs->rescue_lock); 388 spin_lock(&bs->rescue_lock);
385 bio_list_merge(&bs->rescue_list, &punt); 389 bio_list_merge(&bs->rescue_list, &punt);
@@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
466 * we retry with the original gfp_flags. 470 * we retry with the original gfp_flags.
467 */ 471 */
468 472
469 if (current->bio_list && !bio_list_empty(current->bio_list)) 473 if (current->bio_list &&
474 (!bio_list_empty(&current->bio_list[0]) ||
475 !bio_list_empty(&current->bio_list[1])))
470 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 476 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
471 477
472 p = mempool_alloc(bs->bio_pool, gfp_mask); 478 p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/block/blk-core.c b/block/blk-core.c
index 1086dac8724c..d772c221cc17 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -578,8 +578,6 @@ void blk_cleanup_queue(struct request_queue *q)
578 q->queue_lock = &q->__queue_lock; 578 q->queue_lock = &q->__queue_lock;
579 spin_unlock_irq(lock); 579 spin_unlock_irq(lock);
580 580
581 put_disk_devt(q->disk_devt);
582
583 /* @q is and will stay empty, shutdown and put */ 581 /* @q is and will stay empty, shutdown and put */
584 blk_put_queue(q); 582 blk_put_queue(q);
585} 583}
@@ -1975,7 +1973,14 @@ end_io:
1975 */ 1973 */
1976blk_qc_t generic_make_request(struct bio *bio) 1974blk_qc_t generic_make_request(struct bio *bio)
1977{ 1975{
1978 struct bio_list bio_list_on_stack; 1976 /*
1977 * bio_list_on_stack[0] contains bios submitted by the current
1978 * make_request_fn.
1979 * bio_list_on_stack[1] contains bios that were submitted before
1980 * the current make_request_fn, but that haven't been processed
1981 * yet.
1982 */
1983 struct bio_list bio_list_on_stack[2];
1979 blk_qc_t ret = BLK_QC_T_NONE; 1984 blk_qc_t ret = BLK_QC_T_NONE;
1980 1985
1981 if (!generic_make_request_checks(bio)) 1986 if (!generic_make_request_checks(bio))
@@ -1992,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
1992 * should be added at the tail 1997 * should be added at the tail
1993 */ 1998 */
1994 if (current->bio_list) { 1999 if (current->bio_list) {
1995 bio_list_add(current->bio_list, bio); 2000 bio_list_add(&current->bio_list[0], bio);
1996 goto out; 2001 goto out;
1997 } 2002 }
1998 2003
@@ -2011,23 +2016,39 @@ blk_qc_t generic_make_request(struct bio *bio)
2011 * bio_list, and call into ->make_request() again. 2016 * bio_list, and call into ->make_request() again.
2012 */ 2017 */
2013 BUG_ON(bio->bi_next); 2018 BUG_ON(bio->bi_next);
2014 bio_list_init(&bio_list_on_stack); 2019 bio_list_init(&bio_list_on_stack[0]);
2015 current->bio_list = &bio_list_on_stack; 2020 current->bio_list = bio_list_on_stack;
2016 do { 2021 do {
2017 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2022 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2018 2023
2019 if (likely(blk_queue_enter(q, false) == 0)) { 2024 if (likely(blk_queue_enter(q, false) == 0)) {
2025 struct bio_list lower, same;
2026
2027 /* Create a fresh bio_list for all subordinate requests */
2028 bio_list_on_stack[1] = bio_list_on_stack[0];
2029 bio_list_init(&bio_list_on_stack[0]);
2020 ret = q->make_request_fn(q, bio); 2030 ret = q->make_request_fn(q, bio);
2021 2031
2022 blk_queue_exit(q); 2032 blk_queue_exit(q);
2023 2033
2024 bio = bio_list_pop(current->bio_list); 2034 /* sort new bios into those for a lower level
2035 * and those for the same level
2036 */
2037 bio_list_init(&lower);
2038 bio_list_init(&same);
2039 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
2040 if (q == bdev_get_queue(bio->bi_bdev))
2041 bio_list_add(&same, bio);
2042 else
2043 bio_list_add(&lower, bio);
2044 /* now assemble so we handle the lowest level first */
2045 bio_list_merge(&bio_list_on_stack[0], &lower);
2046 bio_list_merge(&bio_list_on_stack[0], &same);
2047 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
2025 } else { 2048 } else {
2026 struct bio *bio_next = bio_list_pop(current->bio_list);
2027
2028 bio_io_error(bio); 2049 bio_io_error(bio);
2029 bio = bio_next;
2030 } 2050 }
2051 bio = bio_list_pop(&bio_list_on_stack[0]);
2031 } while (bio); 2052 } while (bio);
2032 current->bio_list = NULL; /* deactivate */ 2053 current->bio_list = NULL; /* deactivate */
2033 2054
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 295e69670c39..d745ab81033a 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -17,6 +17,15 @@ static void blk_mq_sysfs_release(struct kobject *kobj)
17{ 17{
18} 18}
19 19
20static void blk_mq_hw_sysfs_release(struct kobject *kobj)
21{
22 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
23 kobj);
24 free_cpumask_var(hctx->cpumask);
25 kfree(hctx->ctxs);
26 kfree(hctx);
27}
28
20struct blk_mq_ctx_sysfs_entry { 29struct blk_mq_ctx_sysfs_entry {
21 struct attribute attr; 30 struct attribute attr;
22 ssize_t (*show)(struct blk_mq_ctx *, char *); 31 ssize_t (*show)(struct blk_mq_ctx *, char *);
@@ -200,7 +209,7 @@ static struct kobj_type blk_mq_ctx_ktype = {
200static struct kobj_type blk_mq_hw_ktype = { 209static struct kobj_type blk_mq_hw_ktype = {
201 .sysfs_ops = &blk_mq_hw_sysfs_ops, 210 .sysfs_ops = &blk_mq_hw_sysfs_ops,
202 .default_attrs = default_hw_ctx_attrs, 211 .default_attrs = default_hw_ctx_attrs,
203 .release = blk_mq_sysfs_release, 212 .release = blk_mq_hw_sysfs_release,
204}; 213};
205 214
206static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 215static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -242,24 +251,15 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
242static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) 251static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
243{ 252{
244 struct blk_mq_hw_ctx *hctx; 253 struct blk_mq_hw_ctx *hctx;
245 struct blk_mq_ctx *ctx; 254 int i;
246 int i, j;
247 255
248 queue_for_each_hw_ctx(q, hctx, i) { 256 queue_for_each_hw_ctx(q, hctx, i)
249 blk_mq_unregister_hctx(hctx); 257 blk_mq_unregister_hctx(hctx);
250 258
251 hctx_for_each_ctx(hctx, ctx, j)
252 kobject_put(&ctx->kobj);
253
254 kobject_put(&hctx->kobj);
255 }
256
257 blk_mq_debugfs_unregister_hctxs(q); 259 blk_mq_debugfs_unregister_hctxs(q);
258 260
259 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 261 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
260 kobject_del(&q->mq_kobj); 262 kobject_del(&q->mq_kobj);
261 kobject_put(&q->mq_kobj);
262
263 kobject_put(&dev->kobj); 263 kobject_put(&dev->kobj);
264 264
265 q->mq_sysfs_init_done = false; 265 q->mq_sysfs_init_done = false;
@@ -277,7 +277,19 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
277 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); 277 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
278} 278}
279 279
280static void blk_mq_sysfs_init(struct request_queue *q) 280void blk_mq_sysfs_deinit(struct request_queue *q)
281{
282 struct blk_mq_ctx *ctx;
283 int cpu;
284
285 for_each_possible_cpu(cpu) {
286 ctx = per_cpu_ptr(q->queue_ctx, cpu);
287 kobject_put(&ctx->kobj);
288 }
289 kobject_put(&q->mq_kobj);
290}
291
292void blk_mq_sysfs_init(struct request_queue *q)
281{ 293{
282 struct blk_mq_ctx *ctx; 294 struct blk_mq_ctx *ctx;
283 int cpu; 295 int cpu;
@@ -297,8 +309,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
297 309
298 blk_mq_disable_hotplug(); 310 blk_mq_disable_hotplug();
299 311
300 blk_mq_sysfs_init(q);
301
302 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 312 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
303 if (ret < 0) 313 if (ret < 0)
304 goto out; 314 goto out;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e48bc2c72615..9d97bfc4d465 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
295 for (i = 0; i < set->nr_hw_queues; i++) { 295 for (i = 0; i < set->nr_hw_queues; i++) {
296 struct blk_mq_tags *tags = set->tags[i]; 296 struct blk_mq_tags *tags = set->tags[i];
297 297
298 if (!tags)
299 continue;
300
298 for (j = 0; j < tags->nr_tags; j++) { 301 for (j = 0; j < tags->nr_tags; j++) {
299 if (!tags->static_rqs[j]) 302 if (!tags->static_rqs[j])
300 continue; 303 continue;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b2fd175e84d7..a4546f060e80 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1434 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1434 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1435} 1435}
1436 1436
1437static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) 1437static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1438 bool may_sleep)
1438{ 1439{
1439 struct request_queue *q = rq->q; 1440 struct request_queue *q = rq->q;
1440 struct blk_mq_queue_data bd = { 1441 struct blk_mq_queue_data bd = {
@@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
1475 } 1476 }
1476 1477
1477insert: 1478insert:
1478 blk_mq_sched_insert_request(rq, false, true, true, false); 1479 blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
1479} 1480}
1480 1481
1481/* 1482/*
@@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1569 1570
1570 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) { 1571 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1571 rcu_read_lock(); 1572 rcu_read_lock();
1572 blk_mq_try_issue_directly(old_rq, &cookie); 1573 blk_mq_try_issue_directly(old_rq, &cookie, false);
1573 rcu_read_unlock(); 1574 rcu_read_unlock();
1574 } else { 1575 } else {
1575 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu); 1576 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
1576 blk_mq_try_issue_directly(old_rq, &cookie); 1577 blk_mq_try_issue_directly(old_rq, &cookie, true);
1577 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx); 1578 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1578 } 1579 }
1579 goto done; 1580 goto done;
@@ -1955,16 +1956,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
1955 } 1956 }
1956} 1957}
1957 1958
1958static void blk_mq_free_hw_queues(struct request_queue *q,
1959 struct blk_mq_tag_set *set)
1960{
1961 struct blk_mq_hw_ctx *hctx;
1962 unsigned int i;
1963
1964 queue_for_each_hw_ctx(q, hctx, i)
1965 free_cpumask_var(hctx->cpumask);
1966}
1967
1968static int blk_mq_init_hctx(struct request_queue *q, 1959static int blk_mq_init_hctx(struct request_queue *q,
1969 struct blk_mq_tag_set *set, 1960 struct blk_mq_tag_set *set,
1970 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1961 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
@@ -2045,7 +2036,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
2045 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); 2036 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2046 struct blk_mq_hw_ctx *hctx; 2037 struct blk_mq_hw_ctx *hctx;
2047 2038
2048 memset(__ctx, 0, sizeof(*__ctx));
2049 __ctx->cpu = i; 2039 __ctx->cpu = i;
2050 spin_lock_init(&__ctx->lock); 2040 spin_lock_init(&__ctx->lock);
2051 INIT_LIST_HEAD(&__ctx->rq_list); 2041 INIT_LIST_HEAD(&__ctx->rq_list);
@@ -2257,15 +2247,19 @@ void blk_mq_release(struct request_queue *q)
2257 queue_for_each_hw_ctx(q, hctx, i) { 2247 queue_for_each_hw_ctx(q, hctx, i) {
2258 if (!hctx) 2248 if (!hctx)
2259 continue; 2249 continue;
2260 kfree(hctx->ctxs); 2250 kobject_put(&hctx->kobj);
2261 kfree(hctx);
2262 } 2251 }
2263 2252
2264 q->mq_map = NULL; 2253 q->mq_map = NULL;
2265 2254
2266 kfree(q->queue_hw_ctx); 2255 kfree(q->queue_hw_ctx);
2267 2256
2268 /* ctx kobj stays in queue_ctx */ 2257 /*
2258 * release .mq_kobj and sw queue's kobject now because
2259 * both share lifetime with request queue.
2260 */
2261 blk_mq_sysfs_deinit(q);
2262
2269 free_percpu(q->queue_ctx); 2263 free_percpu(q->queue_ctx);
2270} 2264}
2271 2265
@@ -2330,10 +2324,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2330 if (hctx->tags) 2324 if (hctx->tags)
2331 blk_mq_free_map_and_requests(set, j); 2325 blk_mq_free_map_and_requests(set, j);
2332 blk_mq_exit_hctx(q, set, hctx, j); 2326 blk_mq_exit_hctx(q, set, hctx, j);
2333 free_cpumask_var(hctx->cpumask);
2334 kobject_put(&hctx->kobj); 2327 kobject_put(&hctx->kobj);
2335 kfree(hctx->ctxs);
2336 kfree(hctx);
2337 hctxs[j] = NULL; 2328 hctxs[j] = NULL;
2338 2329
2339 } 2330 }
@@ -2352,6 +2343,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2352 if (!q->queue_ctx) 2343 if (!q->queue_ctx)
2353 goto err_exit; 2344 goto err_exit;
2354 2345
2346 /* init q->mq_kobj and sw queues' kobjects */
2347 blk_mq_sysfs_init(q);
2348
2355 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), 2349 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2356 GFP_KERNEL, set->numa_node); 2350 GFP_KERNEL, set->numa_node);
2357 if (!q->queue_hw_ctx) 2351 if (!q->queue_hw_ctx)
@@ -2442,7 +2436,6 @@ void blk_mq_free_queue(struct request_queue *q)
2442 blk_mq_del_queue_tag_set(q); 2436 blk_mq_del_queue_tag_set(q);
2443 2437
2444 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2438 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2445 blk_mq_free_hw_queues(q, set);
2446} 2439}
2447 2440
2448/* Basically redo blk_mq_init_queue with queue frozen */ 2441/* Basically redo blk_mq_init_queue with queue frozen */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 088ced003c13..b79f9a7d8cf6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -77,6 +77,8 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
77/* 77/*
78 * sysfs helpers 78 * sysfs helpers
79 */ 79 */
80extern void blk_mq_sysfs_init(struct request_queue *q);
81extern void blk_mq_sysfs_deinit(struct request_queue *q);
80extern int blk_mq_sysfs_register(struct request_queue *q); 82extern int blk_mq_sysfs_register(struct request_queue *q);
81extern void blk_mq_sysfs_unregister(struct request_queue *q); 83extern void blk_mq_sysfs_unregister(struct request_queue *q);
82extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 84extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
diff --git a/block/genhd.c b/block/genhd.c
index b26a5ea115d0..a9c516a8b37d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -572,20 +572,6 @@ exit:
572 disk_part_iter_exit(&piter); 572 disk_part_iter_exit(&piter);
573} 573}
574 574
575void put_disk_devt(struct disk_devt *disk_devt)
576{
577 if (disk_devt && atomic_dec_and_test(&disk_devt->count))
578 disk_devt->release(disk_devt);
579}
580EXPORT_SYMBOL(put_disk_devt);
581
582void get_disk_devt(struct disk_devt *disk_devt)
583{
584 if (disk_devt)
585 atomic_inc(&disk_devt->count);
586}
587EXPORT_SYMBOL(get_disk_devt);
588
589/** 575/**
590 * device_add_disk - add partitioning information to kernel list 576 * device_add_disk - add partitioning information to kernel list
591 * @parent: parent device for the disk 577 * @parent: parent device for the disk
@@ -626,13 +612,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
626 612
627 disk_alloc_events(disk); 613 disk_alloc_events(disk);
628 614
629 /*
630 * Take a reference on the devt and assign it to queue since it
631 * must not be reallocated while the bdi is registered
632 */
633 disk->queue->disk_devt = disk->disk_devt;
634 get_disk_devt(disk->disk_devt);
635
636 /* Register BDI before referencing it from bdev */ 615 /* Register BDI before referencing it from bdev */
637 bdi = disk->queue->backing_dev_info; 616 bdi = disk->queue->backing_dev_info;
638 bdi_register_owner(bdi, disk_to_dev(disk)); 617 bdi_register_owner(bdi, disk_to_dev(disk));
@@ -681,12 +660,16 @@ void del_gendisk(struct gendisk *disk)
681 disk->flags &= ~GENHD_FL_UP; 660 disk->flags &= ~GENHD_FL_UP;
682 661
683 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); 662 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
684 /* 663 if (disk->queue) {
685 * Unregister bdi before releasing device numbers (as they can get 664 /*
686 * reused and we'd get clashes in sysfs). 665 * Unregister bdi before releasing device numbers (as they can
687 */ 666 * get reused and we'd get clashes in sysfs).
688 bdi_unregister(disk->queue->backing_dev_info); 667 */
689 blk_unregister_queue(disk); 668 bdi_unregister(disk->queue->backing_dev_info);
669 blk_unregister_queue(disk);
670 } else {
671 WARN_ON(1);
672 }
690 blk_unregister_region(disk_devt(disk), disk->minors); 673 blk_unregister_region(disk_devt(disk), disk->minors);
691 674
692 part_stat_set_all(&disk->part0, 0); 675 part_stat_set_all(&disk->part0, 0);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 1e18dca360fc..14035f826b5e 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1023,7 +1023,6 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
1023 1023
1024static int gen_key(struct opal_dev *dev, void *data) 1024static int gen_key(struct opal_dev *dev, void *data)
1025{ 1025{
1026 const u8 *method;
1027 u8 uid[OPAL_UID_LENGTH]; 1026 u8 uid[OPAL_UID_LENGTH];
1028 int err = 0; 1027 int err = 0;
1029 1028
@@ -1031,7 +1030,6 @@ static int gen_key(struct opal_dev *dev, void *data)
1031 set_comid(dev, dev->comid); 1030 set_comid(dev, dev->comid);
1032 1031
1033 memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len)); 1032 memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
1034 method = opalmethod[OPAL_GENKEY];
1035 kfree(dev->prev_data); 1033 kfree(dev->prev_data);
1036 dev->prev_data = NULL; 1034 dev->prev_data = NULL;
1037 1035
@@ -1669,7 +1667,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
1669static int lock_unlock_locking_range(struct opal_dev *dev, void *data) 1667static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
1670{ 1668{
1671 u8 lr_buffer[OPAL_UID_LENGTH]; 1669 u8 lr_buffer[OPAL_UID_LENGTH];
1672 const u8 *method;
1673 struct opal_lock_unlock *lkul = data; 1670 struct opal_lock_unlock *lkul = data;
1674 u8 read_locked = 1, write_locked = 1; 1671 u8 read_locked = 1, write_locked = 1;
1675 int err = 0; 1672 int err = 0;
@@ -1677,7 +1674,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
1677 clear_opal_cmd(dev); 1674 clear_opal_cmd(dev);
1678 set_comid(dev, dev->comid); 1675 set_comid(dev, dev->comid);
1679 1676
1680 method = opalmethod[OPAL_SET];
1681 if (build_locking_range(lr_buffer, sizeof(lr_buffer), 1677 if (build_locking_range(lr_buffer, sizeof(lr_buffer),
1682 lkul->session.opal_key.lr) < 0) 1678 lkul->session.opal_key.lr) < 0)
1683 return -ERANGE; 1679 return -ERANGE;
@@ -1733,14 +1729,12 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
1733{ 1729{
1734 u8 lr_buffer[OPAL_UID_LENGTH]; 1730 u8 lr_buffer[OPAL_UID_LENGTH];
1735 u8 read_locked = 1, write_locked = 1; 1731 u8 read_locked = 1, write_locked = 1;
1736 const u8 *method;
1737 struct opal_lock_unlock *lkul = data; 1732 struct opal_lock_unlock *lkul = data;
1738 int ret; 1733 int ret;
1739 1734
1740 clear_opal_cmd(dev); 1735 clear_opal_cmd(dev);
1741 set_comid(dev, dev->comid); 1736 set_comid(dev, dev->comid);
1742 1737
1743 method = opalmethod[OPAL_SET];
1744 if (build_locking_range(lr_buffer, sizeof(lr_buffer), 1738 if (build_locking_range(lr_buffer, sizeof(lr_buffer),
1745 lkul->session.opal_key.lr) < 0) 1739 lkul->session.opal_key.lr) < 0)
1746 return -ERANGE; 1740 return -ERANGE;
@@ -2133,7 +2127,7 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
2133 pr_err("Locking state was not RO or RW\n"); 2127 pr_err("Locking state was not RO or RW\n");
2134 return -EINVAL; 2128 return -EINVAL;
2135 } 2129 }
2136 if (lk_unlk->session.who < OPAL_USER1 && 2130 if (lk_unlk->session.who < OPAL_USER1 ||
2137 lk_unlk->session.who > OPAL_USER9) { 2131 lk_unlk->session.who > OPAL_USER9) {
2138 pr_err("Authority was not within the range of users: %d\n", 2132 pr_err("Authority was not within the range of users: %d\n",
2139 lk_unlk->session.who); 2133 lk_unlk->session.who);
@@ -2316,7 +2310,7 @@ static int opal_activate_user(struct opal_dev *dev,
2316 int ret; 2310 int ret;
2317 2311
2318 /* We can't activate Admin1 it's active as manufactured */ 2312 /* We can't activate Admin1 it's active as manufactured */
2319 if (opal_session->who < OPAL_USER1 && 2313 if (opal_session->who < OPAL_USER1 ||
2320 opal_session->who > OPAL_USER9) { 2314 opal_session->who > OPAL_USER9) {
2321 pr_err("Who was not a valid user: %d\n", opal_session->who); 2315 pr_err("Who was not a valid user: %d\n", opal_session->who);
2322 return -EINVAL; 2316 return -EINVAL;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f5e18c2a4852..690deca17c35 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -266,7 +266,7 @@ unlock:
266 return err; 266 return err;
267} 267}
268 268
269int af_alg_accept(struct sock *sk, struct socket *newsock) 269int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
270{ 270{
271 struct alg_sock *ask = alg_sk(sk); 271 struct alg_sock *ask = alg_sk(sk);
272 const struct af_alg_type *type; 272 const struct af_alg_type *type;
@@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
281 if (!type) 281 if (!type)
282 goto unlock; 282 goto unlock;
283 283
284 sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0); 284 sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
285 err = -ENOMEM; 285 err = -ENOMEM;
286 if (!sk2) 286 if (!sk2)
287 goto unlock; 287 goto unlock;
@@ -323,9 +323,10 @@ unlock:
323} 323}
324EXPORT_SYMBOL_GPL(af_alg_accept); 324EXPORT_SYMBOL_GPL(af_alg_accept);
325 325
326static int alg_accept(struct socket *sock, struct socket *newsock, int flags) 326static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
327 bool kern)
327{ 328{
328 return af_alg_accept(sock->sk, newsock); 329 return af_alg_accept(sock->sk, newsock, kern);
329} 330}
330 331
331static const struct proto_ops alg_proto_ops = { 332static const struct proto_ops alg_proto_ops = {
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 54fc90e8339c..5e92bd275ef3 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,8 @@ unlock:
239 return err ?: len; 239 return err ?: len;
240} 240}
241 241
242static int hash_accept(struct socket *sock, struct socket *newsock, int flags) 242static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
243 bool kern)
243{ 244{
244 struct sock *sk = sock->sk; 245 struct sock *sk = sock->sk;
245 struct alg_sock *ask = alg_sk(sk); 246 struct alg_sock *ask = alg_sk(sk);
@@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
260 if (err) 261 if (err)
261 return err; 262 return err;
262 263
263 err = af_alg_accept(ask->parent, newsock); 264 err = af_alg_accept(ask->parent, newsock, kern);
264 if (err) 265 if (err)
265 return err; 266 return err;
266 267
@@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
378} 379}
379 380
380static int hash_accept_nokey(struct socket *sock, struct socket *newsock, 381static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
381 int flags) 382 int flags, bool kern)
382{ 383{
383 int err; 384 int err;
384 385
@@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
386 if (err) 387 if (err)
387 return err; 388 return err;
388 389
389 return hash_accept(sock, newsock, flags); 390 return hash_accept(sock, newsock, flags, kern);
390} 391}
391 392
392static struct proto_ops algif_hash_ops_nokey = { 393static struct proto_ops algif_hash_ops_nokey = {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 219b90bc0922..f15900132912 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void);
41void acpi_container_init(void); 41void acpi_container_init(void);
42void acpi_memory_hotplug_init(void); 42void acpi_memory_hotplug_init(void);
43#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 43#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
44void pci_ioapic_remove(struct acpi_pci_root *root);
44int acpi_ioapic_remove(struct acpi_pci_root *root); 45int acpi_ioapic_remove(struct acpi_pci_root *root);
45#else 46#else
47static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; }
46static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } 48static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
47#endif 49#endif
48#ifdef CONFIG_ACPI_DOCK 50#ifdef CONFIG_ACPI_DOCK
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 6d7ce6e12aaa..1120dfd625b8 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -206,24 +206,34 @@ int acpi_ioapic_add(acpi_handle root_handle)
206 return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; 206 return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
207} 207}
208 208
209int acpi_ioapic_remove(struct acpi_pci_root *root) 209void pci_ioapic_remove(struct acpi_pci_root *root)
210{ 210{
211 int retval = 0;
212 struct acpi_pci_ioapic *ioapic, *tmp; 211 struct acpi_pci_ioapic *ioapic, *tmp;
213 212
214 mutex_lock(&ioapic_list_lock); 213 mutex_lock(&ioapic_list_lock);
215 list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { 214 list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
216 if (root->device->handle != ioapic->root_handle) 215 if (root->device->handle != ioapic->root_handle)
217 continue; 216 continue;
218
219 if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
220 retval = -EBUSY;
221
222 if (ioapic->pdev) { 217 if (ioapic->pdev) {
223 pci_release_region(ioapic->pdev, 0); 218 pci_release_region(ioapic->pdev, 0);
224 pci_disable_device(ioapic->pdev); 219 pci_disable_device(ioapic->pdev);
225 pci_dev_put(ioapic->pdev); 220 pci_dev_put(ioapic->pdev);
226 } 221 }
222 }
223 mutex_unlock(&ioapic_list_lock);
224}
225
226int acpi_ioapic_remove(struct acpi_pci_root *root)
227{
228 int retval = 0;
229 struct acpi_pci_ioapic *ioapic, *tmp;
230
231 mutex_lock(&ioapic_list_lock);
232 list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
233 if (root->device->handle != ioapic->root_handle)
234 continue;
235 if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
236 retval = -EBUSY;
227 if (ioapic->res.flags && ioapic->res.parent) 237 if (ioapic->res.flags && ioapic->res.parent)
228 release_resource(&ioapic->res); 238 release_resource(&ioapic->res);
229 list_del(&ioapic->list); 239 list_del(&ioapic->list);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index bf601d4df8cf..919be0aa2578 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device)
648 648
649 pci_stop_root_bus(root->bus); 649 pci_stop_root_bus(root->bus);
650 650
651 WARN_ON(acpi_ioapic_remove(root)); 651 pci_ioapic_remove(root);
652
653 device_set_run_wake(root->bus->bridge, false); 652 device_set_run_wake(root->bus->bridge, false);
654 pci_acpi_remove_bus_pm_notifier(device); 653 pci_acpi_remove_bus_pm_notifier(device);
655 654
656 pci_remove_root_bus(root->bus); 655 pci_remove_root_bus(root->bus);
656 WARN_ON(acpi_ioapic_remove(root));
657 657
658 dmar_device_remove(device->handle); 658 dmar_device_remove(device->handle);
659 659
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 85d833289f28..4c96f3ac4976 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
177 case AHCI_LS1043A: 177 case AHCI_LS1043A:
178 if (!qpriv->ecc_addr) 178 if (!qpriv->ecc_addr)
179 return -EINVAL; 179 return -EINVAL;
180 writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr); 180 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
181 qpriv->ecc_addr);
181 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 182 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
182 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 183 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
183 if (qpriv->is_dmacoherent) 184 if (qpriv->is_dmacoherent)
@@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
194 case AHCI_LS1046A: 195 case AHCI_LS1046A:
195 if (!qpriv->ecc_addr) 196 if (!qpriv->ecc_addr)
196 return -EINVAL; 197 return -EINVAL;
197 writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr); 198 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
199 qpriv->ecc_addr);
198 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 200 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
199 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 201 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
200 if (qpriv->is_dmacoherent) 202 if (qpriv->is_dmacoherent)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2bd92dca3e62..274d6d7193d7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1482 break; 1482 break;
1483 1483
1484 default: 1484 default:
1485 WARN_ON_ONCE(1);
1486 return AC_ERR_SYSTEM; 1485 return AC_ERR_SYSTEM;
1487 } 1486 }
1488 1487
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 46698232e6bf..19e6e539a061 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
224 224
225static void ata_tport_release(struct device *dev) 225static void ata_tport_release(struct device *dev)
226{ 226{
227 put_device(dev->parent);
228} 227}
229 228
230/** 229/**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
284 device_initialize(dev); 283 device_initialize(dev);
285 dev->type = &ata_port_type; 284 dev->type = &ata_port_type;
286 285
287 dev->parent = get_device(parent); 286 dev->parent = parent;
288 dev->release = ata_tport_release; 287 dev->release = ata_tport_release;
289 dev_set_name(dev, "ata%d", ap->print_id); 288 dev_set_name(dev, "ata%d", ap->print_id);
290 transport_setup_device(dev); 289 transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
348 347
349static void ata_tlink_release(struct device *dev) 348static void ata_tlink_release(struct device *dev)
350{ 349{
351 put_device(dev->parent);
352} 350}
353 351
354/** 352/**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
410 int error; 408 int error;
411 409
412 device_initialize(dev); 410 device_initialize(dev);
413 dev->parent = get_device(&ap->tdev); 411 dev->parent = &ap->tdev;
414 dev->release = ata_tlink_release; 412 dev->release = ata_tlink_release;
415 if (ata_is_host_link(link)) 413 if (ata_is_host_link(link))
416 dev_set_name(dev, "link%d", ap->print_id); 414 dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
589 587
590static void ata_tdev_release(struct device *dev) 588static void ata_tdev_release(struct device *dev)
591{ 589{
592 put_device(dev->parent);
593} 590}
594 591
595/** 592/**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
662 int error; 659 int error;
663 660
664 device_initialize(dev); 661 device_initialize(dev);
665 dev->parent = get_device(&link->tdev); 662 dev->parent = &link->tdev;
666 dev->release = ata_tdev_release; 663 dev->release = ata_tdev_release;
667 if (ata_is_host_link(link)) 664 if (ata_is_host_link(link))
668 dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); 665 dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 684bda4d14a1..6bb60fb6a30b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
639 return restart_syscall(); 639 return restart_syscall();
640} 640}
641 641
642void assert_held_device_hotplug(void)
643{
644 lockdep_assert_held(&device_hotplug_lock);
645}
646
647#ifdef CONFIG_BLOCK 642#ifdef CONFIG_BLOCK
648static inline int device_is_not_partition(struct device *dev) 643static inline int device_is_not_partition(struct device *dev)
649{ 644{
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 10aed84244f5..939641d6e262 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -50,7 +50,7 @@
50 the slower the port i/o. In some cases, setting 50 the slower the port i/o. In some cases, setting
51 this to zero will speed up the device. (default -1) 51 this to zero will speed up the device. (default -1)
52 52
53 major You may use this parameter to overide the 53 major You may use this parameter to override the
54 default major number (46) that this driver 54 default major number (46) that this driver
55 will use. Be sure to change the device 55 will use. Be sure to change the device
56 name as well. 56 name as well.
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 644ba0888bd4..9cfd2e06a649 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -61,7 +61,7 @@
61 first drive found. 61 first drive found.
62 62
63 63
64 major You may use this parameter to overide the 64 major You may use this parameter to override the
65 default major number (45) that this driver 65 default major number (45) that this driver
66 will use. Be sure to change the device 66 will use. Be sure to change the device
67 name as well. 67 name as well.
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ed93e8badf56..14c5d32f5d8b 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -59,7 +59,7 @@
59 the slower the port i/o. In some cases, setting 59 the slower the port i/o. In some cases, setting
60 this to zero will speed up the device. (default -1) 60 this to zero will speed up the device. (default -1)
61 61
62 major You may use this parameter to overide the 62 major You may use this parameter to override the
63 default major number (47) that this driver 63 default major number (47) that this driver
64 will use. Be sure to change the device 64 will use. Be sure to change the device
65 name as well. 65 name as well.
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 5db955fe3a94..3b5882bfb736 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -84,7 +84,7 @@
84 the slower the port i/o. In some cases, setting 84 the slower the port i/o. In some cases, setting
85 this to zero will speed up the device. (default -1) 85 this to zero will speed up the device. (default -1)
86 86
87 major You may use this parameter to overide the 87 major You may use this parameter to override the
88 default major number (97) that this driver 88 default major number (97) that this driver
89 will use. Be sure to change the device 89 will use. Be sure to change the device
90 name as well. 90 name as well.
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 61fc6824299a..e815312a00ad 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -61,7 +61,7 @@
61 the slower the port i/o. In some cases, setting 61 the slower the port i/o. In some cases, setting
62 this to zero will speed up the device. (default -1) 62 this to zero will speed up the device. (default -1)
63 63
64 major You may use this parameter to overide the 64 major You may use this parameter to override the
65 default major number (96) that this driver 65 default major number (96) that this driver
66 will use. Be sure to change the device 66 will use. Be sure to change the device
67 name as well. 67 name as well.
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4d6807723798..517838b65964 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v)
120 120
121/* Feature bits */ 121/* Feature bits */
122 122
123#define RBD_FEATURE_LAYERING (1<<0) 123#define RBD_FEATURE_LAYERING (1ULL<<0)
124#define RBD_FEATURE_STRIPINGV2 (1<<1) 124#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
125#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2) 125#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
126#define RBD_FEATURE_DATA_POOL (1<<7) 126#define RBD_FEATURE_DATA_POOL (1ULL<<7)
127
127#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ 128#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
128 RBD_FEATURE_STRIPINGV2 | \ 129 RBD_FEATURE_STRIPINGV2 | \
129 RBD_FEATURE_EXCLUSIVE_LOCK | \ 130 RBD_FEATURE_EXCLUSIVE_LOCK | \
@@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
499 return is_lock_owner; 500 return is_lock_owner;
500} 501}
501 502
503static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
504{
505 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
506}
507
502static BUS_ATTR(add, S_IWUSR, NULL, rbd_add); 508static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
503static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove); 509static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
504static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major); 510static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
505static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major); 511static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
512static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
506 513
507static struct attribute *rbd_bus_attrs[] = { 514static struct attribute *rbd_bus_attrs[] = {
508 &bus_attr_add.attr, 515 &bus_attr_add.attr,
509 &bus_attr_remove.attr, 516 &bus_attr_remove.attr,
510 &bus_attr_add_single_major.attr, 517 &bus_attr_add_single_major.attr,
511 &bus_attr_remove_single_major.attr, 518 &bus_attr_remove_single_major.attr,
519 &bus_attr_supported_features.attr,
512 NULL, 520 NULL,
513}; 521};
514 522
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e27d89a36c34..dceb5edd1e54 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1189,6 +1189,8 @@ static int zram_add(void)
1189 blk_queue_io_min(zram->disk->queue, PAGE_SIZE); 1189 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1190 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); 1190 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1191 zram->disk->queue->limits.discard_granularity = PAGE_SIZE; 1191 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1192 zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
1193 zram->disk->queue->limits.chunk_sectors = 0;
1192 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); 1194 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1193 /* 1195 /*
1194 * zram_bio_discard() will clear all logical blocks if logical block 1196 * zram_bio_discard() will clear all logical blocks if logical block
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3ad86fdf954e..b1ad12552b56 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
397 irq, err); 397 irq, err);
398 return err; 398 return err;
399 } 399 }
400 omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
401 400
402 priv->clk = of_clk_get(pdev->dev.of_node, 0); 401 priv->clk = devm_clk_get(&pdev->dev, NULL);
403 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER) 402 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
404 return -EPROBE_DEFER; 403 return -EPROBE_DEFER;
405 if (!IS_ERR(priv->clk)) { 404 if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
408 dev_err(&pdev->dev, "unable to enable the clk, " 407 dev_err(&pdev->dev, "unable to enable the clk, "
409 "err = %d\n", err); 408 "err = %d\n", err);
410 } 409 }
410
411 /*
412 * On OMAP4, enabling the shutdown_oflo interrupt is
413 * done in the interrupt mask register. There is no
414 * such register on EIP76, and it's enabled by the
415 * same bit in the control register
416 */
417 if (priv->pdata->regs[RNG_INTMASK_REG])
418 omap_rng_write(priv, RNG_INTMASK_REG,
419 RNG_SHUTDOWN_OFLO_MASK);
420 else
421 omap_rng_write(priv, RNG_CONTROL_REG,
422 RNG_SHUTDOWN_OFLO_MASK);
411 } 423 }
412 return 0; 424 return 0;
413} 425}
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index a5b1eb276c0b..e6d0d271c58c 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -6,7 +6,7 @@
6 6
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/sched.h> 9#include <linux/sched/signal.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/timer.h> 12#include <linux/timer.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1ef26403bcc8..0ab024918907 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -313,13 +313,6 @@ static int random_read_wakeup_bits = 64;
313static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; 313static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
314 314
315/* 315/*
316 * The minimum number of seconds between urandom pool reseeding. We
317 * do this to limit the amount of entropy that can be drained from the
318 * input pool even if there are heavy demands on /dev/urandom.
319 */
320static int random_min_urandom_seed = 60;
321
322/*
323 * Originally, we used a primitive polynomial of degree .poolwords 316 * Originally, we used a primitive polynomial of degree .poolwords
324 * over GF(2). The taps for various sizes are defined below. They 317 * over GF(2). The taps for various sizes are defined below. They
325 * were chosen to be evenly spaced except for the last tap, which is 1 318 * were chosen to be evenly spaced except for the last tap, which is 1
@@ -409,7 +402,6 @@ static struct poolinfo {
409 */ 402 */
410static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); 403static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
411static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 404static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
412static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
413static struct fasync_struct *fasync; 405static struct fasync_struct *fasync;
414 406
415static DEFINE_SPINLOCK(random_ready_list_lock); 407static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -467,7 +459,6 @@ struct entropy_store {
467 int entropy_count; 459 int entropy_count;
468 int entropy_total; 460 int entropy_total;
469 unsigned int initialized:1; 461 unsigned int initialized:1;
470 unsigned int limit:1;
471 unsigned int last_data_init:1; 462 unsigned int last_data_init:1;
472 __u8 last_data[EXTRACT_SIZE]; 463 __u8 last_data[EXTRACT_SIZE];
473}; 464};
@@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
485static struct entropy_store input_pool = { 476static struct entropy_store input_pool = {
486 .poolinfo = &poolinfo_table[0], 477 .poolinfo = &poolinfo_table[0],
487 .name = "input", 478 .name = "input",
488 .limit = 1,
489 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), 479 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
490 .pool = input_pool_data 480 .pool = input_pool_data
491}; 481};
@@ -493,7 +483,6 @@ static struct entropy_store input_pool = {
493static struct entropy_store blocking_pool = { 483static struct entropy_store blocking_pool = {
494 .poolinfo = &poolinfo_table[1], 484 .poolinfo = &poolinfo_table[1],
495 .name = "blocking", 485 .name = "blocking",
496 .limit = 1,
497 .pull = &input_pool, 486 .pull = &input_pool,
498 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), 487 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
499 .pool = blocking_pool_data, 488 .pool = blocking_pool_data,
@@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
855 spin_unlock_irqrestore(&primary_crng.lock, flags); 844 spin_unlock_irqrestore(&primary_crng.lock, flags);
856} 845}
857 846
858static inline void maybe_reseed_primary_crng(void)
859{
860 if (crng_init > 2 &&
861 time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL))
862 crng_reseed(&primary_crng, &input_pool);
863}
864
865static inline void crng_wait_ready(void) 847static inline void crng_wait_ready(void)
866{ 848{
867 wait_event_interruptible(crng_init_wait, crng_ready()); 849 wait_event_interruptible(crng_init_wait, crng_ready());
@@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1220 r->entropy_count > r->poolinfo->poolfracbits) 1202 r->entropy_count > r->poolinfo->poolfracbits)
1221 return; 1203 return;
1222 1204
1223 if (r->limit == 0 && random_min_urandom_seed) {
1224 unsigned long now = jiffies;
1225
1226 if (time_before(now,
1227 r->last_pulled + random_min_urandom_seed * HZ))
1228 return;
1229 r->last_pulled = now;
1230 }
1231
1232 _xfer_secondary_pool(r, nbytes); 1205 _xfer_secondary_pool(r, nbytes);
1233} 1206}
1234 1207
@@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1236{ 1209{
1237 __u32 tmp[OUTPUT_POOL_WORDS]; 1210 __u32 tmp[OUTPUT_POOL_WORDS];
1238 1211
1239 /* For /dev/random's pool, always leave two wakeups' worth */
1240 int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
1241 int bytes = nbytes; 1212 int bytes = nbytes;
1242 1213
1243 /* pull at least as much as a wakeup */ 1214 /* pull at least as much as a wakeup */
@@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1248 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, 1219 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1249 ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); 1220 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1250 bytes = extract_entropy(r->pull, tmp, bytes, 1221 bytes = extract_entropy(r->pull, tmp, bytes,
1251 random_read_wakeup_bits / 8, rsvd_bytes); 1222 random_read_wakeup_bits / 8, 0);
1252 mix_pool_bytes(r, tmp, bytes); 1223 mix_pool_bytes(r, tmp, bytes);
1253 credit_entropy_bits(r, bytes*8); 1224 credit_entropy_bits(r, bytes*8);
1254} 1225}
@@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work)
1276static size_t account(struct entropy_store *r, size_t nbytes, int min, 1247static size_t account(struct entropy_store *r, size_t nbytes, int min,
1277 int reserved) 1248 int reserved)
1278{ 1249{
1279 int entropy_count, orig; 1250 int entropy_count, orig, have_bytes;
1280 size_t ibytes, nfrac; 1251 size_t ibytes, nfrac;
1281 1252
1282 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); 1253 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
1285retry: 1256retry:
1286 entropy_count = orig = ACCESS_ONCE(r->entropy_count); 1257 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
1287 ibytes = nbytes; 1258 ibytes = nbytes;
1288 /* If limited, never pull more than available */ 1259 /* never pull more than available */
1289 if (r->limit) { 1260 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1290 int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1291 1261
1292 if ((have_bytes -= reserved) < 0) 1262 if ((have_bytes -= reserved) < 0)
1293 have_bytes = 0; 1263 have_bytes = 0;
1294 ibytes = min_t(size_t, ibytes, have_bytes); 1264 ibytes = min_t(size_t, ibytes, have_bytes);
1295 }
1296 if (ibytes < min) 1265 if (ibytes < min)
1297 ibytes = 0; 1266 ibytes = 0;
1298 1267
@@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1912static int min_read_thresh = 8, min_write_thresh; 1881static int min_read_thresh = 8, min_write_thresh;
1913static int max_read_thresh = OUTPUT_POOL_WORDS * 32; 1882static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
1914static int max_write_thresh = INPUT_POOL_WORDS * 32; 1883static int max_write_thresh = INPUT_POOL_WORDS * 32;
1884static int random_min_urandom_seed = 60;
1915static char sysctl_bootid[16]; 1885static char sysctl_bootid[16];
1916 1886
1917/* 1887/*
@@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = {
2042}; 2012};
2043#endif /* CONFIG_SYSCTL */ 2013#endif /* CONFIG_SYSCTL */
2044 2014
2045static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 2015struct batched_entropy {
2046 2016 union {
2047int random_int_secret_init(void) 2017 u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
2048{ 2018 u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
2049 get_random_bytes(random_int_secret, sizeof(random_int_secret)); 2019 };
2050 return 0; 2020 unsigned int position;
2051} 2021};
2052
2053static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
2054 __aligned(sizeof(unsigned long));
2055 2022
2056/* 2023/*
2057 * Get a random word for internal kernel use only. Similar to urandom but 2024 * Get a random word for internal kernel use only. The quality of the random
2058 * with the goal of minimal entropy pool depletion. As a result, the random 2025 * number is either as good as RDRAND or as good as /dev/urandom, with the
2059 * value is not cryptographically secure but for several uses the cost of 2026 * goal of being quite fast and not depleting entropy.
2060 * depleting entropy is too high
2061 */ 2027 */
2062unsigned int get_random_int(void) 2028static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2029u64 get_random_u64(void)
2063{ 2030{
2064 __u32 *hash; 2031 u64 ret;
2065 unsigned int ret; 2032 struct batched_entropy *batch;
2066 2033
2067 if (arch_get_random_int(&ret)) 2034#if BITS_PER_LONG == 64
2035 if (arch_get_random_long((unsigned long *)&ret))
2068 return ret; 2036 return ret;
2037#else
2038 if (arch_get_random_long((unsigned long *)&ret) &&
2039 arch_get_random_long((unsigned long *)&ret + 1))
2040 return ret;
2041#endif
2069 2042
2070 hash = get_cpu_var(get_random_int_hash); 2043 batch = &get_cpu_var(batched_entropy_u64);
2071 2044 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2072 hash[0] += current->pid + jiffies + random_get_entropy(); 2045 extract_crng((u8 *)batch->entropy_u64);
2073 md5_transform(hash, random_int_secret); 2046 batch->position = 0;
2074 ret = hash[0]; 2047 }
2075 put_cpu_var(get_random_int_hash); 2048 ret = batch->entropy_u64[batch->position++];
2076 2049 put_cpu_var(batched_entropy_u64);
2077 return ret; 2050 return ret;
2078} 2051}
2079EXPORT_SYMBOL(get_random_int); 2052EXPORT_SYMBOL(get_random_u64);
2080 2053
2081/* 2054static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2082 * Same as get_random_int(), but returns unsigned long. 2055u32 get_random_u32(void)
2083 */
2084unsigned long get_random_long(void)
2085{ 2056{
2086 __u32 *hash; 2057 u32 ret;
2087 unsigned long ret; 2058 struct batched_entropy *batch;
2088 2059
2089 if (arch_get_random_long(&ret)) 2060 if (arch_get_random_int(&ret))
2090 return ret; 2061 return ret;
2091 2062
2092 hash = get_cpu_var(get_random_int_hash); 2063 batch = &get_cpu_var(batched_entropy_u32);
2093 2064 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2094 hash[0] += current->pid + jiffies + random_get_entropy(); 2065 extract_crng((u8 *)batch->entropy_u32);
2095 md5_transform(hash, random_int_secret); 2066 batch->position = 0;
2096 ret = *(unsigned long *)hash; 2067 }
2097 put_cpu_var(get_random_int_hash); 2068 ret = batch->entropy_u32[batch->position++];
2098 2069 put_cpu_var(batched_entropy_u32);
2099 return ret; 2070 return ret;
2100} 2071}
2101EXPORT_SYMBOL(get_random_long); 2072EXPORT_SYMBOL(get_random_u32);
2102 2073
2103/** 2074/**
2104 * randomize_page - Generate a random, page aligned address 2075 * randomize_page - Generate a random, page aligned address
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a47543281864..38b9fdf854a4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2532,4 +2532,5 @@ static int __init cpufreq_core_init(void)
2532 2532
2533 return 0; 2533 return 0;
2534} 2534}
2535module_param(off, int, 0444);
2535core_initcall(cpufreq_core_init); 2536core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b1fbaa30ae04..3d37219a0dd7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -377,6 +377,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
377 intel_pstate_init_limits(limits); 377 intel_pstate_init_limits(limits);
378 limits->min_perf_pct = 100; 378 limits->min_perf_pct = 100;
379 limits->min_perf = int_ext_tofp(1); 379 limits->min_perf = int_ext_tofp(1);
380 limits->min_sysfs_pct = 100;
380} 381}
381 382
382static DEFINE_MUTEX(intel_pstate_driver_lock); 383static DEFINE_MUTEX(intel_pstate_driver_lock);
@@ -968,11 +969,20 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
968} 969}
969 970
970static void intel_pstate_update_policies(void) 971static void intel_pstate_update_policies(void)
972 __releases(&intel_pstate_limits_lock)
973 __acquires(&intel_pstate_limits_lock)
971{ 974{
975 struct perf_limits *saved_limits = limits;
972 int cpu; 976 int cpu;
973 977
978 mutex_unlock(&intel_pstate_limits_lock);
979
974 for_each_possible_cpu(cpu) 980 for_each_possible_cpu(cpu)
975 cpufreq_update_policy(cpu); 981 cpufreq_update_policy(cpu);
982
983 mutex_lock(&intel_pstate_limits_lock);
984
985 limits = saved_limits;
976} 986}
977 987
978/************************** debugfs begin ************************/ 988/************************** debugfs begin ************************/
@@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1180 1190
1181 limits->no_turbo = clamp_t(int, input, 0, 1); 1191 limits->no_turbo = clamp_t(int, input, 0, 1);
1182 1192
1183 mutex_unlock(&intel_pstate_limits_lock);
1184
1185 intel_pstate_update_policies(); 1193 intel_pstate_update_policies();
1186 1194
1195 mutex_unlock(&intel_pstate_limits_lock);
1196
1187 mutex_unlock(&intel_pstate_driver_lock); 1197 mutex_unlock(&intel_pstate_driver_lock);
1188 1198
1189 return count; 1199 return count;
@@ -1217,10 +1227,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1217 limits->max_perf_pct); 1227 limits->max_perf_pct);
1218 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 1228 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
1219 1229
1220 mutex_unlock(&intel_pstate_limits_lock);
1221
1222 intel_pstate_update_policies(); 1230 intel_pstate_update_policies();
1223 1231
1232 mutex_unlock(&intel_pstate_limits_lock);
1233
1224 mutex_unlock(&intel_pstate_driver_lock); 1234 mutex_unlock(&intel_pstate_driver_lock);
1225 1235
1226 return count; 1236 return count;
@@ -1254,10 +1264,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1254 limits->min_perf_pct); 1264 limits->min_perf_pct);
1255 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 1265 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
1256 1266
1257 mutex_unlock(&intel_pstate_limits_lock);
1258
1259 intel_pstate_update_policies(); 1267 intel_pstate_update_policies();
1260 1268
1269 mutex_unlock(&intel_pstate_limits_lock);
1270
1261 mutex_unlock(&intel_pstate_driver_lock); 1271 mutex_unlock(&intel_pstate_driver_lock);
1262 1272
1263 return count; 1273 return count;
@@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1874 1884
1875 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1885 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
1876 pstate = clamp_t(int, pstate, min_perf, max_perf); 1886 pstate = clamp_t(int, pstate, min_perf, max_perf);
1877 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1878 return pstate; 1887 return pstate;
1879} 1888}
1880 1889
1881static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1890static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1882{ 1891{
1883 pstate = intel_pstate_prepare_request(cpu, pstate);
1884 if (pstate == cpu->pstate.current_pstate) 1892 if (pstate == cpu->pstate.current_pstate)
1885 return; 1893 return;
1886 1894
@@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1900 1908
1901 update_turbo_state(); 1909 update_turbo_state();
1902 1910
1911 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
1912 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
1903 intel_pstate_update_pstate(cpu, target_pstate); 1913 intel_pstate_update_pstate(cpu, target_pstate);
1904 1914
1905 sample = &cpu->sample; 1915 sample = &cpu->sample;
@@ -2132,16 +2142,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2132 mutex_lock(&intel_pstate_limits_lock); 2142 mutex_lock(&intel_pstate_limits_lock);
2133 2143
2134 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 2144 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
2145 pr_debug("set performance\n");
2135 if (!perf_limits) { 2146 if (!perf_limits) {
2136 limits = &performance_limits; 2147 limits = &performance_limits;
2137 perf_limits = limits; 2148 perf_limits = limits;
2138 } 2149 }
2139 if (policy->max >= policy->cpuinfo.max_freq &&
2140 !limits->no_turbo) {
2141 pr_debug("set performance\n");
2142 intel_pstate_set_performance_limits(perf_limits);
2143 goto out;
2144 }
2145 } else { 2150 } else {
2146 pr_debug("set powersave\n"); 2151 pr_debug("set powersave\n");
2147 if (!perf_limits) { 2152 if (!perf_limits) {
@@ -2152,7 +2157,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2152 } 2157 }
2153 2158
2154 intel_pstate_update_perf_limits(policy, perf_limits); 2159 intel_pstate_update_perf_limits(policy, perf_limits);
2155 out: 2160
2156 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2161 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2157 /* 2162 /*
2158 * NOHZ_FULL CPUs need this as the governor callback may not 2163 * NOHZ_FULL CPUs need this as the governor callback may not
@@ -2198,9 +2203,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2198 unsigned int max_freq, min_freq; 2203 unsigned int max_freq, min_freq;
2199 2204
2200 max_freq = policy->cpuinfo.max_freq * 2205 max_freq = policy->cpuinfo.max_freq *
2201 limits->max_sysfs_pct / 100; 2206 perf_limits->max_sysfs_pct / 100;
2202 min_freq = policy->cpuinfo.max_freq * 2207 min_freq = policy->cpuinfo.max_freq *
2203 limits->min_sysfs_pct / 100; 2208 perf_limits->min_sysfs_pct / 100;
2204 cpufreq_verify_within_limits(policy, min_freq, max_freq); 2209 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2205 } 2210 }
2206 2211
@@ -2243,13 +2248,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2243 2248
2244 cpu = all_cpu_data[policy->cpu]; 2249 cpu = all_cpu_data[policy->cpu];
2245 2250
2246 /*
2247 * We need sane value in the cpu->perf_limits, so inherit from global
2248 * perf_limits limits, which are seeded with values based on the
2249 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
2250 */
2251 if (per_cpu_limits) 2251 if (per_cpu_limits)
2252 memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); 2252 intel_pstate_init_limits(cpu->perf_limits);
2253 2253
2254 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2254 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
2255 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2255 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2301,7 +2301,6 @@ static struct cpufreq_driver intel_pstate = {
2301static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 2301static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2302{ 2302{
2303 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2303 struct cpudata *cpu = all_cpu_data[policy->cpu];
2304 struct perf_limits *perf_limits = limits;
2305 2304
2306 update_turbo_state(); 2305 update_turbo_state();
2307 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2306 policy->cpuinfo.max_freq = limits->turbo_disabled ?
@@ -2309,15 +2308,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2309 2308
2310 cpufreq_verify_within_cpu_limits(policy); 2309 cpufreq_verify_within_cpu_limits(policy);
2311 2310
2312 if (per_cpu_limits)
2313 perf_limits = cpu->perf_limits;
2314
2315 mutex_lock(&intel_pstate_limits_lock);
2316
2317 intel_pstate_update_perf_limits(policy, perf_limits);
2318
2319 mutex_unlock(&intel_pstate_limits_lock);
2320
2321 return 0; 2311 return 0;
2322} 2312}
2323 2313
@@ -2370,6 +2360,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
2370 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2360 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
2371 pstate_funcs.get_val(cpu, target_pstate)); 2361 pstate_funcs.get_val(cpu, target_pstate));
2372 } 2362 }
2363 freqs.new = target_pstate * cpu->pstate.scaling;
2373 cpufreq_freq_transition_end(policy, &freqs, false); 2364 cpufreq_freq_transition_end(policy, &freqs, false);
2374 2365
2375 return 0; 2366 return 0;
@@ -2383,8 +2374,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2383 2374
2384 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2375 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
2385 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2376 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2377 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2386 intel_pstate_update_pstate(cpu, target_pstate); 2378 intel_pstate_update_pstate(cpu, target_pstate);
2387 return target_freq; 2379 return target_pstate * cpu->pstate.scaling;
2388} 2380}
2389 2381
2390static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2382static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -2437,8 +2429,11 @@ static int intel_pstate_register_driver(void)
2437 2429
2438 intel_pstate_init_limits(&powersave_limits); 2430 intel_pstate_init_limits(&powersave_limits);
2439 intel_pstate_set_performance_limits(&performance_limits); 2431 intel_pstate_set_performance_limits(&performance_limits);
2440 limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ? 2432 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
2441 &performance_limits : &powersave_limits; 2433 intel_pstate_driver == &intel_pstate)
2434 limits = &performance_limits;
2435 else
2436 limits = &powersave_limits;
2442 2437
2443 ret = cpufreq_register_driver(intel_pstate_driver); 2438 ret = cpufreq_register_driver(intel_pstate_driver);
2444 if (ret) { 2439 if (ret) {
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0ce85c..1b9da3dc799b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
270 scatterwalk_done(&walk, out, 0); 270 scatterwalk_done(&walk, out, 0);
271} 271}
272 272
273static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 273static void s5p_sg_done(struct s5p_aes_dev *dev)
274{ 274{
275 if (dev->sg_dst_cpy) { 275 if (dev->sg_dst_cpy) {
276 dev_dbg(dev->dev, 276 dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
281 } 281 }
282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
284}
284 285
285 /* holding a lock outside */ 286/* Calls the completion. Cannot be called with dev->lock hold. */
287static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
288{
286 dev->req->base.complete(&dev->req->base, err); 289 dev->req->base.complete(&dev->req->base, err);
287 dev->busy = false; 290 dev->busy = false;
288} 291}
@@ -368,51 +371,44 @@ exit:
368} 371}
369 372
370/* 373/*
371 * Returns true if new transmitting (output) data is ready and its 374 * Returns -ERRNO on error (mapping of new data failed).
372 * address+length have to be written to device (by calling 375 * On success returns:
373 * s5p_set_dma_outdata()). False otherwise. 376 * - 0 if there is no more data,
377 * - 1 if new transmitting (output) data is ready and its address+length
378 * have to be written to device (by calling s5p_set_dma_outdata()).
374 */ 379 */
375static bool s5p_aes_tx(struct s5p_aes_dev *dev) 380static int s5p_aes_tx(struct s5p_aes_dev *dev)
376{ 381{
377 int err = 0; 382 int ret = 0;
378 bool ret = false;
379 383
380 s5p_unset_outdata(dev); 384 s5p_unset_outdata(dev);
381 385
382 if (!sg_is_last(dev->sg_dst)) { 386 if (!sg_is_last(dev->sg_dst)) {
383 err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 387 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
384 if (err) 388 if (!ret)
385 s5p_aes_complete(dev, err); 389 ret = 1;
386 else
387 ret = true;
388 } else {
389 s5p_aes_complete(dev, err);
390
391 dev->busy = true;
392 tasklet_schedule(&dev->tasklet);
393 } 390 }
394 391
395 return ret; 392 return ret;
396} 393}
397 394
398/* 395/*
399 * Returns true if new receiving (input) data is ready and its 396 * Returns -ERRNO on error (mapping of new data failed).
400 * address+length have to be written to device (by calling 397 * On success returns:
401 * s5p_set_dma_indata()). False otherwise. 398 * - 0 if there is no more data,
399 * - 1 if new receiving (input) data is ready and its address+length
400 * have to be written to device (by calling s5p_set_dma_indata()).
402 */ 401 */
403static bool s5p_aes_rx(struct s5p_aes_dev *dev) 402static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
404{ 403{
405 int err; 404 int ret = 0;
406 bool ret = false;
407 405
408 s5p_unset_indata(dev); 406 s5p_unset_indata(dev);
409 407
410 if (!sg_is_last(dev->sg_src)) { 408 if (!sg_is_last(dev->sg_src)) {
411 err = s5p_set_indata(dev, sg_next(dev->sg_src)); 409 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
412 if (err) 410 if (!ret)
413 s5p_aes_complete(dev, err); 411 ret = 1;
414 else
415 ret = true;
416 } 412 }
417 413
418 return ret; 414 return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
422{ 418{
423 struct platform_device *pdev = dev_id; 419 struct platform_device *pdev = dev_id;
424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 420 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
425 bool set_dma_tx = false; 421 int err_dma_tx = 0;
426 bool set_dma_rx = false; 422 int err_dma_rx = 0;
423 bool tx_end = false;
427 unsigned long flags; 424 unsigned long flags;
428 uint32_t status; 425 uint32_t status;
426 int err;
429 427
430 spin_lock_irqsave(&dev->lock, flags); 428 spin_lock_irqsave(&dev->lock, flags);
431 429
430 /*
431 * Handle rx or tx interrupt. If there is still data (scatterlist did not
432 * reach end), then map next scatterlist entry.
433 * In case of such mapping error, s5p_aes_complete() should be called.
434 *
435 * If there is no more data in tx scatter list, call s5p_aes_complete()
436 * and schedule new tasklet.
437 */
432 status = SSS_READ(dev, FCINTSTAT); 438 status = SSS_READ(dev, FCINTSTAT);
433 if (status & SSS_FCINTSTAT_BRDMAINT) 439 if (status & SSS_FCINTSTAT_BRDMAINT)
434 set_dma_rx = s5p_aes_rx(dev); 440 err_dma_rx = s5p_aes_rx(dev);
435 if (status & SSS_FCINTSTAT_BTDMAINT) 441
436 set_dma_tx = s5p_aes_tx(dev); 442 if (status & SSS_FCINTSTAT_BTDMAINT) {
443 if (sg_is_last(dev->sg_dst))
444 tx_end = true;
445 err_dma_tx = s5p_aes_tx(dev);
446 }
437 447
438 SSS_WRITE(dev, FCINTPEND, status); 448 SSS_WRITE(dev, FCINTPEND, status);
439 449
440 /* 450 if (err_dma_rx < 0) {
441 * Writing length of DMA block (either receiving or transmitting) 451 err = err_dma_rx;
442 * will start the operation immediately, so this should be done 452 goto error;
443 * at the end (even after clearing pending interrupts to not miss the 453 }
444 * interrupt). 454 if (err_dma_tx < 0) {
445 */ 455 err = err_dma_tx;
446 if (set_dma_tx) 456 goto error;
447 s5p_set_dma_outdata(dev, dev->sg_dst); 457 }
448 if (set_dma_rx) 458
449 s5p_set_dma_indata(dev, dev->sg_src); 459 if (tx_end) {
460 s5p_sg_done(dev);
461
462 spin_unlock_irqrestore(&dev->lock, flags);
463
464 s5p_aes_complete(dev, 0);
465 dev->busy = true;
466 tasklet_schedule(&dev->tasklet);
467 } else {
468 /*
469 * Writing length of DMA block (either receiving or
470 * transmitting) will start the operation immediately, so this
471 * should be done at the end (even after clearing pending
472 * interrupts to not miss the interrupt).
473 */
474 if (err_dma_tx == 1)
475 s5p_set_dma_outdata(dev, dev->sg_dst);
476 if (err_dma_rx == 1)
477 s5p_set_dma_indata(dev, dev->sg_src);
450 478
479 spin_unlock_irqrestore(&dev->lock, flags);
480 }
481
482 return IRQ_HANDLED;
483
484error:
485 s5p_sg_done(dev);
451 spin_unlock_irqrestore(&dev->lock, flags); 486 spin_unlock_irqrestore(&dev->lock, flags);
487 s5p_aes_complete(dev, err);
452 488
453 return IRQ_HANDLED; 489 return IRQ_HANDLED;
454} 490}
@@ -597,8 +633,9 @@ outdata_error:
597 s5p_unset_indata(dev); 633 s5p_unset_indata(dev);
598 634
599indata_error: 635indata_error:
600 s5p_aes_complete(dev, err); 636 s5p_sg_done(dev);
601 spin_unlock_irqrestore(&dev->lock, flags); 637 spin_unlock_irqrestore(&dev->lock, flags);
638 s5p_aes_complete(dev, err);
602} 639}
603 640
604static void s5p_tasklet_cb(unsigned long data) 641static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
805 dev_warn(dev, "feed control interrupt is not available.\n"); 842 dev_warn(dev, "feed control interrupt is not available.\n");
806 goto err_irq; 843 goto err_irq;
807 } 844 }
808 err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt, 845 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
809 IRQF_SHARED, pdev->name, pdev); 846 s5p_aes_interrupt, IRQF_ONESHOT,
847 pdev->name, pdev);
810 if (err < 0) { 848 if (err < 0) {
811 dev_warn(dev, "feed control interrupt is not available.\n"); 849 dev_warn(dev, "feed control interrupt is not available.\n");
812 goto err_irq; 850 goto err_irq;
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 43a0c8a26ab0..00a16ab601cb 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data,
82void cryp_flush_inoutfifo(struct cryp_device_data *device_data) 82void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
83{ 83{
84 /* 84 /*
85 * We always need to disble the hardware before trying to flush the 85 * We always need to disable the hardware before trying to flush the
86 * FIFO. This is something that isn't written in the design 86 * FIFO. This is something that isn't written in the design
87 * specification, but we have been informed by the hardware designers 87 * specification, but we have been informed by the hardware designers
88 * that this must be done. 88 * that this must be done.
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 349dc3e1e52e..974c5a31a005 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void)
65 bool systab_found; 65 bool systab_found;
66 66
67 efi_mm.pgd = pgd_alloc(&efi_mm); 67 efi_mm.pgd = pgd_alloc(&efi_mm);
68 mm_init_cpumask(&efi_mm);
68 init_new_context(NULL, &efi_mm); 69 init_new_context(NULL, &efi_mm);
69 70
70 systab_found = false; 71 systab_found = false;
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 6def402bf569..5da36e56b36a 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
45 size = sizeof(secboot); 45 size = sizeof(secboot);
46 status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, 46 status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
47 NULL, &size, &secboot); 47 NULL, &size, &secboot);
48 if (status == EFI_NOT_FOUND)
49 return efi_secureboot_mode_disabled;
48 if (status != EFI_SUCCESS) 50 if (status != EFI_SUCCESS)
49 goto out_efi_err; 51 goto out_efi_err;
50 52
@@ -78,7 +80,5 @@ secure_boot_enabled:
78 80
79out_efi_err: 81out_efi_err:
80 pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); 82 pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
81 if (status == EFI_NOT_FOUND)
82 return efi_secureboot_mode_disabled;
83 return efi_secureboot_mode_unknown; 83 return efi_secureboot_mode_unknown;
84} 84}
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb57915b..8a08e81ee90d 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
3# of AMDSOC/AMDGPU drm driver. 3# of AMDSOC/AMDGPU drm driver.
4# It provides the HW control for ACP related functionalities. 4# It provides the HW control for ACP related functionalities.
5 5
6subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
7
8AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o 6AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d2d0f60ff36d..99424cb8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ free_partial_kdata:
240 for (; i >= 0; i--) 240 for (; i >= 0; i--)
241 drm_free_large(p->chunks[i].kdata); 241 drm_free_large(p->chunks[i].kdata);
242 kfree(p->chunks); 242 kfree(p->chunks);
243 p->chunks = NULL;
244 p->nchunks = 0;
243put_ctx: 245put_ctx:
244 amdgpu_ctx_put(p->ctx); 246 amdgpu_ctx_put(p->ctx);
245free_chunk: 247free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6abb238b25c9..a3a105ec99e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2094 } 2094 }
2095 2095
2096 r = amdgpu_late_init(adev); 2096 r = amdgpu_late_init(adev);
2097 if (r) 2097 if (r) {
2098 if (fbcon)
2099 console_unlock();
2098 return r; 2100 return r;
2101 }
2099 2102
2100 /* pin cursors */ 2103 /* pin cursors */
2101 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2104 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2587,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2587 use_bank = 0; 2590 use_bank = 0;
2588 } 2591 }
2589 2592
2590 *pos &= 0x3FFFF; 2593 *pos &= (1UL << 22) - 1;
2591 2594
2592 if (use_bank) { 2595 if (use_bank) {
2593 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 2596 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2663,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2663 use_bank = 0; 2666 use_bank = 0;
2664 } 2667 }
2665 2668
2666 *pos &= 0x3FFFF; 2669 *pos &= (1UL << 22) - 1;
2667 2670
2668 if (use_bank) { 2671 if (use_bank) {
2669 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 2672 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 75fc376ba735..f7adbace428a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -59,9 +59,10 @@
59 * - 3.7.0 - Add support for VCE clock list packet 59 * - 3.7.0 - Add support for VCE clock list packet
60 * - 3.8.0 - Add support raster config init in the kernel 60 * - 3.8.0 - Add support raster config init in the kernel
61 * - 3.9.0 - Add support for memory query info about VRAM and GTT. 61 * - 3.9.0 - Add support for memory query info about VRAM and GTT.
62 * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
62 */ 63 */
63#define KMS_DRIVER_MAJOR 3 64#define KMS_DRIVER_MAJOR 3
64#define KMS_DRIVER_MINOR 9 65#define KMS_DRIVER_MINOR 10
65#define KMS_DRIVER_PATCHLEVEL 0 66#define KMS_DRIVER_PATCHLEVEL 0
66 67
67int amdgpu_vram_limit = 0; 68int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 51d759463384..106cf83c2e6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
202 bool kernel = false; 202 bool kernel = false;
203 int r; 203 int r;
204 204
205 /* reject invalid gem flags */
206 if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
207 AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
208 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
209 AMDGPU_GEM_CREATE_VRAM_CLEARED|
210 AMDGPU_GEM_CREATE_SHADOW |
211 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
212 r = -EINVAL;
213 goto error_unlock;
214 }
215 /* reject invalid gem domains */
216 if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
217 AMDGPU_GEM_DOMAIN_GTT |
218 AMDGPU_GEM_DOMAIN_VRAM |
219 AMDGPU_GEM_DOMAIN_GDS |
220 AMDGPU_GEM_DOMAIN_GWS |
221 AMDGPU_GEM_DOMAIN_OA)) {
222 r = -EINVAL;
223 goto error_unlock;
224 }
225
205 /* create a gem object to contain this object in */ 226 /* create a gem object to contain this object in */
206 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 227 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
207 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { 228 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31375bdde6f1..011800f621c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
788 } 788 }
789 } 789 }
790 790
791 /* disble sdma engine before programing it */ 791 /* disable sdma engine before programing it */
792 sdma_v3_0_ctx_switch_enable(adev, false); 792 sdma_v3_0_ctx_switch_enable(adev, false);
793 sdma_v3_0_enable(adev, false); 793 sdma_v3_0_enable(adev, false);
794 794
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index f55e45b52fbc..33b504bafb88 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3464 (adev->pdev->device == 0x6667)) { 3464 (adev->pdev->device == 0x6667)) {
3465 max_sclk = 75000; 3465 max_sclk = 75000;
3466 } 3466 }
3467 } else if (adev->asic_type == CHIP_OLAND) {
3468 if ((adev->pdev->device == 0x6604) &&
3469 (adev->pdev->subsystem_vendor == 0x1028) &&
3470 (adev->pdev->subsystem_device == 0x066F)) {
3471 max_sclk = 75000;
3472 }
3467 } 3473 }
3468 3474
3469 if (rps->vce_active) { 3475 if (rps->vce_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 50bdb24ef8d6..4a785d6acfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
1051 /* rev0 hardware requires workarounds to support PG */ 1051 /* rev0 hardware requires workarounds to support PG */
1052 adev->pg_flags = 0; 1052 adev->pg_flags = 0;
1053 if (adev->rev_id != 0x00) { 1053 if (adev->rev_id != 0x00) {
1054 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | 1054 adev->pg_flags |=
1055 AMD_PG_SUPPORT_GFX_SMG | 1055 AMD_PG_SUPPORT_GFX_SMG |
1056 AMD_PG_SUPPORT_GFX_PIPELINE | 1056 AMD_PG_SUPPORT_GFX_PIPELINE |
1057 AMD_PG_SUPPORT_CP | 1057 AMD_PG_SUPPORT_CP |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 8cf71f3c6d0e..261b828ad590 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
178 if (bgate) { 178 if (bgate) {
179 cgs_set_powergating_state(hwmgr->device, 179 cgs_set_powergating_state(hwmgr->device,
180 AMD_IP_BLOCK_TYPE_VCE, 180 AMD_IP_BLOCK_TYPE_VCE,
181 AMD_PG_STATE_UNGATE); 181 AMD_PG_STATE_GATE);
182 cgs_set_clockgating_state(hwmgr->device, 182 cgs_set_clockgating_state(hwmgr->device,
183 AMD_IP_BLOCK_TYPE_VCE, 183 AMD_IP_BLOCK_TYPE_VCE,
184 AMD_CG_STATE_GATE); 184 AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 08e6a71f5d05..294b53697334 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
63 63
64 clk_prepare_enable(hwdev->pxlclk); 64 clk_prepare_enable(hwdev->pxlclk);
65 65
66 /* mclk needs to be set to the same or higher rate than pxlclk */ 66 /* We rely on firmware to set mclk to a sensible level. */
67 clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
68 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); 67 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
69 68
70 hwdev->modeset(hwdev, &vm); 69 hwdev->modeset(hwdev, &vm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 488aedf5b58d..9f5513006eee 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, 84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, 85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, 86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
87}; 87};
88 88
89#define MALIDP_DE_DEFAULT_PREFETCH_START 5 89#define MALIDP_DE_DEFAULT_PREFETCH_START 5
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 414aada10fe5..d5aec082294c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -37,6 +37,8 @@
37#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) 37#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
38#define MALIDP_LAYER_COMP_SIZE 0x010 38#define MALIDP_LAYER_COMP_SIZE 0x010
39#define MALIDP_LAYER_OFFSET 0x014 39#define MALIDP_LAYER_OFFSET 0x014
40#define MALIDP550_LS_ENABLE 0x01c
41#define MALIDP550_LS_R1_IN_SIZE 0x020
40 42
41/* 43/*
42 * This 4-entry look-up-table is used to determine the full 8-bit alpha value 44 * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
242 LAYER_V_VAL(plane->state->crtc_y), 244 LAYER_V_VAL(plane->state->crtc_y),
243 mp->layer->base + MALIDP_LAYER_OFFSET); 245 mp->layer->base + MALIDP_LAYER_OFFSET);
244 246
247 if (mp->layer->id == DE_SMART)
248 malidp_hw_write(mp->hwdev,
249 LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
250 mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
251
245 /* first clear the rotation bits */ 252 /* first clear the rotation bits */
246 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); 253 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
247 val &= ~LAYER_ROT_MASK; 254 val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
330 plane->hwdev = malidp->dev; 337 plane->hwdev = malidp->dev;
331 plane->layer = &map->layers[i]; 338 plane->layer = &map->layers[i];
332 339
333 /* Skip the features which the SMART layer doesn't have */ 340 if (id == DE_SMART) {
334 if (id == DE_SMART) 341 /*
342 * Enable the first rectangle in the SMART layer to be
343 * able to use it as a drm plane.
344 */
345 malidp_hw_write(malidp->dev, 1,
346 plane->layer->base + MALIDP550_LS_ENABLE);
347 /* Skip the features which the SMART layer doesn't have. */
335 continue; 348 continue;
349 }
336 350
337 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); 351 drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
338 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, 352 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index aff6d4a84e99..b816067a65c5 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -84,6 +84,7 @@
84/* Stride register offsets relative to Lx_BASE */ 84/* Stride register offsets relative to Lx_BASE */
85#define MALIDP_DE_LG_STRIDE 0x18 85#define MALIDP_DE_LG_STRIDE 0x18
86#define MALIDP_DE_LV_STRIDE0 0x18 86#define MALIDP_DE_LV_STRIDE0 0x18
87#define MALIDP550_DE_LS_R1_STRIDE 0x28
87 88
88/* macros to set values into registers */ 89/* macros to set values into registers */
89#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) 90#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c8baab9bee0d..ba58f1b11d1e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -148,6 +148,9 @@ static const struct edid_quirk {
148 148
149 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ 149 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
150 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, 150 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
151
152 /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
153 { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
151}; 154};
152 155
153/* 156/*
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4a6a2ed65732..b7d7721e72fa 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -41,6 +41,54 @@ enum {
41 INTEL_GVT_PCI_BAR_MAX, 41 INTEL_GVT_PCI_BAR_MAX,
42}; 42};
43 43
44/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
45 * byte) byte by byte in standard pci configuration space. (not the full
46 * 256 bytes.)
47 */
48static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
49 [PCI_COMMAND] = 0xff, 0x07,
50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
51 [PCI_CACHE_LINE_SIZE] = 0xff,
52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
54 [PCI_INTERRUPT_LINE] = 0xff,
55};
56
57/**
58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory
59 *
60 * Use this function to write virtual cfg space memory.
61 * For standard cfg space, only RW bits can be changed,
62 * and we emulates the RW1C behavior of PCI_STATUS register.
63 */
64static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
65 u8 *src, unsigned int bytes)
66{
67 u8 *cfg_base = vgpu_cfg_space(vgpu);
68 u8 mask, new, old;
69 int i = 0;
70
71 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
72 mask = pci_cfg_space_rw_bmp[off + i];
73 old = cfg_base[off + i];
74 new = src[i] & mask;
75
76 /**
77 * The PCI_STATUS high byte has RW1C bits, here
78 * emulates clear by writing 1 for these bits.
79 * Writing a 0b to RW1C bits has no effect.
80 */
81 if (off + i == PCI_STATUS + 1)
82 new = (~new & old) & mask;
83
84 cfg_base[off + i] = (old & ~mask) | new;
85 }
86
87 /* For other configuration space directly copy as it is. */
88 if (i < bytes)
89 memcpy(cfg_base + off + i, src + i, bytes - i);
90}
91
44/** 92/**
45 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
46 * 94 *
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
123 u8 changed = old ^ new; 171 u8 changed = old ^ new;
124 int ret; 172 int ret;
125 173
126 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 174 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
127 if (!(changed & PCI_COMMAND_MEMORY)) 175 if (!(changed & PCI_COMMAND_MEMORY))
128 return 0; 176 return 0;
129 177
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
237{ 285{
238 int ret; 286 int ret;
239 287
288 if (vgpu->failsafe)
289 return 0;
290
240 if (WARN_ON(bytes > 4)) 291 if (WARN_ON(bytes > 4))
241 return -EINVAL; 292 return -EINVAL;
242 293
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
274 if (ret) 325 if (ret)
275 return ret; 326 return ret;
276 327
277 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 328 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
278 break; 329 break;
279 default: 330 default:
280 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 331 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
281 break; 332 break;
282 } 333 }
283 return 0; 334 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9c8e2407682..7ae6e2b241c8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
668 if (d_info == NULL) 668 if (d_info == NULL)
669 return; 669 return;
670 670
671 gvt_err("opcode=0x%x %s sub_ops:", 671 gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
672 cmd >> (32 - d_info->op_len), d_info->name); 672 cmd >> (32 - d_info->op_len), d_info->name);
673 673
674 for (i = 0; i < d_info->nr_sub_op; i++) 674 for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
693 int cnt = 0; 693 int cnt = 0;
694 int i; 694 int i;
695 695
696 gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" 696 gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, 697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
698 s->ring_id, s->ring_start, s->ring_start + s->ring_size, 698 s->ring_id, s->ring_start, s->ring_start + s->ring_size,
699 s->ring_head, s->ring_tail); 699 s->ring_head, s->ring_tail);
700 700
701 gvt_err(" %s %s ip_gma(%08lx) ", 701 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
702 s->buf_type == RING_BUFFER_INSTRUCTION ? 702 s->buf_type == RING_BUFFER_INSTRUCTION ?
703 "RING_BUFFER" : "BATCH_BUFFER", 703 "RING_BUFFER" : "BATCH_BUFFER",
704 s->buf_addr_type == GTT_BUFFER ? 704 s->buf_addr_type == GTT_BUFFER ?
705 "GTT" : "PPGTT", s->ip_gma); 705 "GTT" : "PPGTT", s->ip_gma);
706 706
707 if (s->ip_va == NULL) { 707 if (s->ip_va == NULL) {
708 gvt_err(" ip_va(NULL)"); 708 gvt_dbg_cmd(" ip_va(NULL)");
709 return; 709 return;
710 } 710 }
711 711
712 gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", 712 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), 713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
714 cmd_val(s, 2), cmd_val(s, 3)); 714 cmd_val(s, 2), cmd_val(s, 3));
715 715
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8fde880c39..5419ae6ec633 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
83 return 0; 83 return 0;
84} 84}
85 85
86static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
87 {
88/* EDID with 1024x768 as its resolution */
89 /*Header*/
90 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
91 /* Vendor & Product Identification */
92 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
93 /* Version & Revision */
94 0x01, 0x04,
95 /* Basic Display Parameters & Features */
96 0xa5, 0x34, 0x20, 0x78, 0x23,
97 /* Color Characteristics */
98 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
99 /* Established Timings: maximum resolution is 1024x768 */
100 0x21, 0x08, 0x00,
101 /* Standard Timings. All invalid */
102 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
103 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
104 /* 18 Byte Data Blocks 1: invalid */
105 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
106 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
107 /* 18 Byte Data Blocks 2: invalid */
108 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
109 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
110 /* 18 Byte Data Blocks 3: invalid */
111 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
112 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
113 /* 18 Byte Data Blocks 4: invalid */
114 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
115 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
116 /* Extension Block Count */
117 0x00,
118 /* Checksum */
119 0xef,
120 },
121 {
86/* EDID with 1920x1200 as its resolution */ 122/* EDID with 1920x1200 as its resolution */
87static unsigned char virtual_dp_monitor_edid[] = { 123 /*Header*/
88 /*Header*/ 124 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
89 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 125 /* Vendor & Product Identification */
90 /* Vendor & Product Identification */ 126 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
91 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, 127 /* Version & Revision */
92 /* Version & Revision */ 128 0x01, 0x04,
93 0x01, 0x04, 129 /* Basic Display Parameters & Features */
94 /* Basic Display Parameters & Features */ 130 0xa5, 0x34, 0x20, 0x78, 0x23,
95 0xa5, 0x34, 0x20, 0x78, 0x23, 131 /* Color Characteristics */
96 /* Color Characteristics */ 132 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
97 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, 133 /* Established Timings: maximum resolution is 1024x768 */
98 /* Established Timings: maximum resolution is 1024x768 */ 134 0x21, 0x08, 0x00,
99 0x21, 0x08, 0x00, 135 /*
100 /* 136 * Standard Timings.
101 * Standard Timings. 137 * below new resolutions can be supported:
102 * below new resolutions can be supported: 138 * 1920x1080, 1280x720, 1280x960, 1280x1024,
103 * 1920x1080, 1280x720, 1280x960, 1280x1024, 139 * 1440x900, 1600x1200, 1680x1050
104 * 1440x900, 1600x1200, 1680x1050 140 */
105 */ 141 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
106 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, 142 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
107 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, 143 /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
108 /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ 144 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
109 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 145 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
110 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, 146 /* 18 Byte Data Blocks 2: invalid */
111 /* 18 Byte Data Blocks 2: invalid */ 147 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
112 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 148 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
113 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 149 /* 18 Byte Data Blocks 3: invalid */
114 /* 18 Byte Data Blocks 3: invalid */ 150 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
115 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 151 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
116 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, 152 /* 18 Byte Data Blocks 4: invalid */
117 /* 18 Byte Data Blocks 4: invalid */ 153 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
118 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 154 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
119 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, 155 /* Extension Block Count */
120 /* Extension Block Count */ 156 0x00,
121 0x00, 157 /* Checksum */
122 /* Checksum */ 158 0x45,
123 0x45, 159 },
124}; 160};
125 161
126#define DPCD_HEADER_SIZE 0xb 162#define DPCD_HEADER_SIZE 0xb
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
140 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | 176 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
141 SDE_PORTE_HOTPLUG_SPT); 177 SDE_PORTE_HOTPLUG_SPT);
142 178
143 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) 179 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
144 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; 180 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
181 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
182 }
145 183
146 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) 184 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
147 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; 185 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
186 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
187 }
148 188
149 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) 189 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
150 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; 190 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
191 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
192 }
151 193
152 if (IS_SKYLAKE(dev_priv) && 194 if (IS_SKYLAKE(dev_priv) &&
153 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { 195 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
160 GEN8_PORT_DP_A_HOTPLUG; 202 GEN8_PORT_DP_A_HOTPLUG;
161 else 203 else
162 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; 204 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
205
206 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
163 } 207 }
164} 208}
165 209
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
175} 219}
176 220
177static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, 221static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
178 int type) 222 int type, unsigned int resolution)
179{ 223{
180 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); 224 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
181 225
226 if (WARN_ON(resolution >= GVT_EDID_NUM))
227 return -EINVAL;
228
182 port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); 229 port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
183 if (!port->edid) 230 if (!port->edid)
184 return -ENOMEM; 231 return -ENOMEM;
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
189 return -ENOMEM; 236 return -ENOMEM;
190 } 237 }
191 238
192 memcpy(port->edid->edid_block, virtual_dp_monitor_edid, 239 memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
193 EDID_SIZE); 240 EDID_SIZE);
194 port->edid->data_valid = true; 241 port->edid->data_valid = true;
195 242
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
322 * Zero on success, negative error code if failed. 369 * Zero on success, negative error code if failed.
323 * 370 *
324 */ 371 */
325int intel_vgpu_init_display(struct intel_vgpu *vgpu) 372int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
326{ 373{
327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 374 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
328 375
329 intel_vgpu_init_i2c_edid(vgpu); 376 intel_vgpu_init_i2c_edid(vgpu);
330 377
331 if (IS_SKYLAKE(dev_priv)) 378 if (IS_SKYLAKE(dev_priv))
332 return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); 379 return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
380 resolution);
333 else 381 else
334 return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); 382 return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
383 resolution);
335} 384}
336 385
337/** 386/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8b234ea961f6..d73de22102e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -154,10 +154,28 @@ struct intel_vgpu_port {
154 int type; 154 int type;
155}; 155};
156 156
157enum intel_vgpu_edid {
158 GVT_EDID_1024_768,
159 GVT_EDID_1920_1200,
160 GVT_EDID_NUM,
161};
162
163static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
164{
165 switch (id) {
166 case GVT_EDID_1024_768:
167 return "1024x768";
168 case GVT_EDID_1920_1200:
169 return "1920x1200";
170 default:
171 return "";
172 }
173}
174
157void intel_gvt_emulate_vblank(struct intel_gvt *gvt); 175void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
158void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); 176void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
159 177
160int intel_vgpu_init_display(struct intel_vgpu *vgpu); 178int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
161void intel_vgpu_reset_display(struct intel_vgpu *vgpu); 179void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
162void intel_vgpu_clean_display(struct intel_vgpu *vgpu); 180void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
163 181
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1cb29b2d7dc6..933a7c211a1c 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
83 firmware = vmalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
86 86
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 28c92346db0e..6a5ff23ded90 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1825 gma = g_gtt_index << GTT_PAGE_SHIFT; 1825 gma = g_gtt_index << GTT_PAGE_SHIFT;
1826 1826
1827 /* the VM may configure the whole GM space when ballooning is used */ 1827 /* the VM may configure the whole GM space when ballooning is used */
1828 if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma), 1828 if (!vgpu_gmadr_is_valid(vgpu, gma))
1829 "vgpu%d: found oob ggtt write, offset %x\n",
1830 vgpu->id, off)) {
1831 return 0; 1829 return 0;
1832 }
1833 1830
1834 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1831 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1835 1832
@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2015 return create_scratch_page_tree(vgpu); 2012 return create_scratch_page_tree(vgpu);
2016} 2013}
2017 2014
2015static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
2016{
2017 struct list_head *pos, *n;
2018 struct intel_vgpu_mm *mm;
2019
2020 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2021 mm = container_of(pos, struct intel_vgpu_mm, list);
2022 if (mm->type == type) {
2023 vgpu->gvt->gtt.mm_free_page_table(mm);
2024 list_del(&mm->list);
2025 list_del(&mm->lru_list);
2026 kfree(mm);
2027 }
2028 }
2029}
2030
2018/** 2031/**
2019 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2032 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2020 * @vgpu: a vGPU 2033 * @vgpu: a vGPU
@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2027 */ 2040 */
2028void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2041void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2029{ 2042{
2030 struct list_head *pos, *n;
2031 struct intel_vgpu_mm *mm;
2032
2033 ppgtt_free_all_shadow_page(vgpu); 2043 ppgtt_free_all_shadow_page(vgpu);
2034 release_scratch_page_tree(vgpu); 2044 release_scratch_page_tree(vgpu);
2035 2045
2036 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2046 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2037 mm = container_of(pos, struct intel_vgpu_mm, list); 2047 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
2038 vgpu->gvt->gtt.mm_free_page_table(mm);
2039 list_del(&mm->list);
2040 list_del(&mm->lru_list);
2041 kfree(mm);
2042 }
2043} 2048}
2044 2049
2045static void clean_spt_oos(struct intel_gvt *gvt) 2050static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2322 int i; 2327 int i;
2323 2328
2324 ppgtt_free_all_shadow_page(vgpu); 2329 ppgtt_free_all_shadow_page(vgpu);
2330
2331 /* Shadow pages are only created when there is no page
2332 * table tracking data, so remove page tracking data after
2333 * removing the shadow pages.
2334 */
2335 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2336
2325 if (!dmlr) 2337 if (!dmlr)
2326 return; 2338 return;
2327 2339
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index e227caf5859e..23791920ced1 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -143,6 +143,8 @@ struct intel_vgpu {
143 int id; 143 int id;
144 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ 144 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
145 bool active; 145 bool active;
146 bool pv_notified;
147 bool failsafe;
146 bool resetting; 148 bool resetting;
147 void *sched_data; 149 void *sched_data;
148 150
@@ -203,18 +205,18 @@ struct intel_gvt_firmware {
203}; 205};
204 206
205struct intel_gvt_opregion { 207struct intel_gvt_opregion {
206 void __iomem *opregion_va; 208 void *opregion_va;
207 u32 opregion_pa; 209 u32 opregion_pa;
208}; 210};
209 211
210#define NR_MAX_INTEL_VGPU_TYPES 20 212#define NR_MAX_INTEL_VGPU_TYPES 20
211struct intel_vgpu_type { 213struct intel_vgpu_type {
212 char name[16]; 214 char name[16];
213 unsigned int max_instance;
214 unsigned int avail_instance; 215 unsigned int avail_instance;
215 unsigned int low_gm_size; 216 unsigned int low_gm_size;
216 unsigned int high_gm_size; 217 unsigned int high_gm_size;
217 unsigned int fence; 218 unsigned int fence;
219 enum intel_vgpu_edid resolution;
218}; 220};
219 221
220struct intel_gvt { 222struct intel_gvt {
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
317 __u64 low_gm_sz; /* in MB */ 319 __u64 low_gm_sz; /* in MB */
318 __u64 high_gm_sz; /* in MB */ 320 __u64 high_gm_sz; /* in MB */
319 __u64 fence_sz; 321 __u64 fence_sz;
322 __u64 resolution;
320 __s32 primary; 323 __s32 primary;
321 __u64 vgpu_id; 324 __u64 vgpu_id;
322}; 325};
@@ -449,6 +452,11 @@ struct intel_gvt_ops {
449}; 452};
450 453
451 454
455enum {
456 GVT_FAILSAFE_UNSUPPORTED_GUEST,
457 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
458};
459
452#include "mpt.h" 460#include "mpt.h"
453 461
454#endif 462#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1d450627ff65..8e43395c748a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
121 info->size = size; 121 info->size = size;
122 info->length = (i + 4) < end ? 4 : (end - i); 122 info->length = (i + 4) < end ? 4 : (end - i);
123 info->addr_mask = addr_mask; 123 info->addr_mask = addr_mask;
124 info->ro_mask = ro_mask;
124 info->device = device; 125 info->device = device;
125 info->read = read ? read : intel_vgpu_default_mmio_read; 126 info->read = read ? read : intel_vgpu_default_mmio_read;
126 info->write = write ? write : intel_vgpu_default_mmio_write; 127 info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
150#define fence_num_to_offset(num) \ 151#define fence_num_to_offset(num) \
151 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) 152 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
152 153
154
155static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
156{
157 switch (reason) {
158 case GVT_FAILSAFE_UNSUPPORTED_GUEST:
159 pr_err("Detected your guest driver doesn't support GVT-g.\n");
160 break;
161 case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
162 pr_err("Graphics resource is not enough for the guest\n");
163 default:
164 break;
165 }
166 pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
167 vgpu->failsafe = true;
168}
169
153static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, 170static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
154 unsigned int fence_num, void *p_data, unsigned int bytes) 171 unsigned int fence_num, void *p_data, unsigned int bytes)
155{ 172{
156 if (fence_num >= vgpu_fence_sz(vgpu)) { 173 if (fence_num >= vgpu_fence_sz(vgpu)) {
157 gvt_err("vgpu%d: found oob fence register access\n", 174
158 vgpu->id); 175 /* When guest access oob fence regs without access
159 gvt_err("vgpu%d: total fence num %d access fence num %d\n", 176 * pv_info first, we treat guest not supporting GVT,
160 vgpu->id, vgpu_fence_sz(vgpu), fence_num); 177 * and we will let vgpu enter failsafe mode.
178 */
179 if (!vgpu->pv_notified)
180 enter_failsafe_mode(vgpu,
181 GVT_FAILSAFE_UNSUPPORTED_GUEST);
182
183 if (!vgpu->mmio.disable_warn_untrack) {
184 gvt_err("vgpu%d: found oob fence register access\n",
185 vgpu->id);
186 gvt_err("vgpu%d: total fence %d, access fence %d\n",
187 vgpu->id, vgpu_fence_sz(vgpu),
188 fence_num);
189 }
161 memset(p_data, 0, bytes); 190 memset(p_data, 0, bytes);
191 return -EINVAL;
162 } 192 }
163 return 0; 193 return 0;
164} 194}
@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
369 return 0; 399 return 0;
370} 400}
371 401
402/* ascendingly sorted */
403static i915_reg_t force_nonpriv_white_list[] = {
404 GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
405 GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
406 GEN8_CS_CHICKEN1,//_MMIO(0x2580)
407 _MMIO(0x2690),
408 _MMIO(0x2694),
409 _MMIO(0x2698),
410 _MMIO(0x4de0),
411 _MMIO(0x4de4),
412 _MMIO(0x4dfc),
413 GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
414 _MMIO(0x7014),
415 HDC_CHICKEN0,//_MMIO(0x7300)
416 GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
417 _MMIO(0x7700),
418 _MMIO(0x7704),
419 _MMIO(0x7708),
420 _MMIO(0x770c),
421 _MMIO(0xb110),
422 GEN8_L3SQCREG4,//_MMIO(0xb118)
423 _MMIO(0xe100),
424 _MMIO(0xe18c),
425 _MMIO(0xe48c),
426 _MMIO(0xe5f4),
427};
428
429/* a simple bsearch */
430static inline bool in_whitelist(unsigned int reg)
431{
432 int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
433 i915_reg_t *array = force_nonpriv_white_list;
434
435 while (left < right) {
436 int mid = (left + right)/2;
437
438 if (reg > array[mid].reg)
439 left = mid + 1;
440 else if (reg < array[mid].reg)
441 right = mid;
442 else
443 return true;
444 }
445 return false;
446}
447
448static int force_nonpriv_write(struct intel_vgpu *vgpu,
449 unsigned int offset, void *p_data, unsigned int bytes)
450{
451 u32 reg_nonpriv = *(u32 *)p_data;
452 int ret = -EINVAL;
453
454 if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
455 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
456 vgpu->id, offset, bytes);
457 return ret;
458 }
459
460 if (in_whitelist(reg_nonpriv)) {
461 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
462 bytes);
463 } else {
464 gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
465 vgpu->id, reg_nonpriv);
466 }
467 return ret;
468}
469
372static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 470static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
373 void *p_data, unsigned int bytes) 471 void *p_data, unsigned int bytes)
374{ 472{
@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1001 if (invalid_read) 1099 if (invalid_read)
1002 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1100 gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
1003 offset, bytes, *(u32 *)p_data); 1101 offset, bytes, *(u32 *)p_data);
1102 vgpu->pv_notified = true;
1004 return 0; 1103 return 0;
1005} 1104}
1006 1105
@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
1039 char vmid_str[20]; 1138 char vmid_str[20];
1040 char display_ready_str[20]; 1139 char display_ready_str[20];
1041 1140
1042 snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); 1141 snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
1043 env[0] = display_ready_str; 1142 env[0] = display_ready_str;
1044 1143
1045 snprintf(vmid_str, 20, "VMID=%d", vgpu->id); 1144 snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1078 case _vgtif_reg(execlist_context_descriptor_lo): 1177 case _vgtif_reg(execlist_context_descriptor_lo):
1079 case _vgtif_reg(execlist_context_descriptor_hi): 1178 case _vgtif_reg(execlist_context_descriptor_hi):
1080 break; 1179 break;
1180 case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1182 break;
1081 default: 1183 default:
1082 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1184 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
1083 offset, bytes, data); 1185 offset, bytes, data);
@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1203 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); 1305 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
1204 1306
1205 switch (cmd) { 1307 switch (cmd) {
1206 case 0x6: 1308 case GEN9_PCODE_READ_MEM_LATENCY:
1207 /** 1309 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
1208 * "Read memory latency" command on gen9. 1310 /**
1209 * Below memory latency values are read 1311 * "Read memory latency" command on gen9.
1210 * from skylake platform. 1312 * Below memory latency values are read
1211 */ 1313 * from skylake platform.
1212 if (!*data0) 1314 */
1213 *data0 = 0x1e1a1100; 1315 if (!*data0)
1214 else 1316 *data0 = 0x1e1a1100;
1215 *data0 = 0x61514b3d; 1317 else
1318 *data0 = 0x61514b3d;
1319 }
1320 break;
1321 case SKL_PCODE_CDCLK_CONTROL:
1322 if (IS_SKYLAKE(vgpu->gvt->dev_priv))
1323 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1216 break; 1324 break;
1217 case 0x5: 1325 case GEN6_PCODE_READ_RC6VIDS:
1218 *data0 |= 0x1; 1326 *data0 |= 0x1;
1219 break; 1327 break;
1220 } 1328 }
1221 1329
1222 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", 1330 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
1223 vgpu->id, value, *data0); 1331 vgpu->id, value, *data0);
1224 1332 /**
1225 value &= ~(1 << 31); 1333 * PCODE_READY clear means ready for pcode read/write,
1334 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
1335 * always emulate as pcode read/write success and ready for access
1336 * anytime, since we don't touch real physical registers here.
1337 */
1338 value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
1226 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); 1339 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
1227} 1340}
1228 1341
@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1318 bool enable_execlist; 1431 bool enable_execlist;
1319 1432
1320 write_vreg(vgpu, offset, p_data, bytes); 1433 write_vreg(vgpu, offset, p_data, bytes);
1434
1435 /* when PPGTT mode enabled, we will check if guest has called
1436 * pvinfo, if not, we will treat this guest as non-gvtg-aware
1437 * guest, and stop emulating its cfg space, mmio, gtt, etc.
1438 */
1439 if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
1440 (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
1441 && !vgpu->pv_notified) {
1442 enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
1443 return 0;
1444 }
1321 if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) 1445 if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
1322 || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { 1446 || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
1323 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); 1447 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1400#define MMIO_GM(reg, d, r, w) \ 1524#define MMIO_GM(reg, d, r, w) \
1401 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) 1525 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
1402 1526
1527#define MMIO_GM_RDR(reg, d, r, w) \
1528 MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
1529
1403#define MMIO_RO(reg, d, f, rm, r, w) \ 1530#define MMIO_RO(reg, d, f, rm, r, w) \
1404 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) 1531 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
1405 1532
@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1419#define MMIO_RING_GM(prefix, d, r, w) \ 1546#define MMIO_RING_GM(prefix, d, r, w) \
1420 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) 1547 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
1421 1548
1549#define MMIO_RING_GM_RDR(prefix, d, r, w) \
1550 MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
1551
1422#define MMIO_RING_RO(prefix, d, f, rm, r, w) \ 1552#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
1423 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) 1553 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
1424 1554
@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1427 struct drm_i915_private *dev_priv = gvt->dev_priv; 1557 struct drm_i915_private *dev_priv = gvt->dev_priv;
1428 int ret; 1558 int ret;
1429 1559
1430 MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1560 MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
1561 intel_vgpu_reg_imr_handler);
1431 1562
1432 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1563 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
1433 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); 1564 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
1434 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); 1565 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
1435 MMIO_D(SDEISR, D_ALL); 1566 MMIO_D(SDEISR, D_ALL);
1436 1567
1437 MMIO_RING_D(RING_HWSTAM, D_ALL); 1568 MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
1438 1569
1439 MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1570 MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1440 MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1571 MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1441 MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1572 MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1442 MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1573 MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
1443 1574
1444#define RING_REG(base) (base + 0x28) 1575#define RING_REG(base) (base + 0x28)
1445 MMIO_RING_D(RING_REG, D_ALL); 1576 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1446#undef RING_REG 1577#undef RING_REG
1447 1578
1448#define RING_REG(base) (base + 0x134) 1579#define RING_REG(base) (base + 0x134)
1449 MMIO_RING_D(RING_REG, D_ALL); 1580 MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
1450#undef RING_REG 1581#undef RING_REG
1451 1582
1452 MMIO_GM(0x2148, D_ALL, NULL, NULL); 1583 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
1453 MMIO_GM(CCID, D_ALL, NULL, NULL); 1584 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
1454 MMIO_GM(0x12198, D_ALL, NULL, NULL); 1585 MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
1455 MMIO_D(GEN7_CXT_SIZE, D_ALL); 1586 MMIO_D(GEN7_CXT_SIZE, D_ALL);
1456 1587
1457 MMIO_RING_D(RING_TAIL, D_ALL); 1588 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1458 MMIO_RING_D(RING_HEAD, D_ALL); 1589 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1459 MMIO_RING_D(RING_CTL, D_ALL); 1590 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1460 MMIO_RING_D(RING_ACTHD, D_ALL); 1591 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1461 MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); 1592 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
1462 1593
1463 /* RING MODE */ 1594 /* RING MODE */
1464#define RING_REG(base) (base + 0x29c) 1595#define RING_REG(base) (base + 0x29c)
1465 MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); 1596 MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
1597 ring_mode_mmio_write);
1466#undef RING_REG 1598#undef RING_REG
1467 1599
1468 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1600 MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1469 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); 1601 NULL, NULL);
1602 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1603 NULL, NULL);
1470 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, 1604 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
1471 ring_timestamp_mmio_read, NULL); 1605 ring_timestamp_mmio_read, NULL);
1472 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, 1606 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
1473 ring_timestamp_mmio_read, NULL); 1607 ring_timestamp_mmio_read, NULL);
1474 1608
1475 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1609 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1476 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); 1610 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1611 NULL, NULL);
1477 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1612 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1478 1613 MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1479 MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); 1614 MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1480 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); 1615
1481 MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); 1616 MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1482 MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); 1617 MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1483 MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); 1618 MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1484 MMIO_D(GAM_ECOCHK, D_ALL); 1619 MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1485 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); 1620 MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1621 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
1622 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1623 NULL, NULL);
1486 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1624 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1487 MMIO_D(0x9030, D_ALL); 1625 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
1488 MMIO_D(0x20a0, D_ALL); 1626 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
1489 MMIO_D(0x2420, D_ALL); 1627 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
1490 MMIO_D(0x2430, D_ALL); 1628 MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
1491 MMIO_D(0x2434, D_ALL); 1629 MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
1492 MMIO_D(0x2438, D_ALL); 1630 MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
1493 MMIO_D(0x243c, D_ALL); 1631 MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
1494 MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); 1632 MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1495 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1633 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1496 MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); 1634 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1497 1635
1498 /* display */ 1636 /* display */
1499 MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); 1637 MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2022 MMIO_D(FORCEWAKE_ACK, D_ALL); 2160 MMIO_D(FORCEWAKE_ACK, D_ALL);
2023 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); 2161 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
2024 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); 2162 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
2025 MMIO_D(GTFIFODBG, D_ALL); 2163 MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
2026 MMIO_D(GTFIFOCTL, D_ALL); 2164 MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2027 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); 2165 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
2028 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); 2166 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
2029 MMIO_D(ECOBUS, D_ALL); 2167 MMIO_D(ECOBUS, D_ALL);
@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2080 2218
2081 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); 2219 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
2082 2220
2083 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); 2221 MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
2084 MMIO_D(GEN6_PCODE_DATA, D_ALL); 2222 MMIO_D(GEN6_PCODE_DATA, D_ALL);
2085 MMIO_D(0x13812c, D_ALL); 2223 MMIO_D(0x13812c, D_ALL);
2086 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); 2224 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2159 MMIO_D(0x1a054, D_ALL); 2297 MMIO_D(0x1a054, D_ALL);
2160 2298
2161 MMIO_D(0x44070, D_ALL); 2299 MMIO_D(0x44070, D_ALL);
2162 2300 MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
2163 MMIO_D(0x215c, D_HSW_PLUS);
2164 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2301 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2165 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2302 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2166 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2303 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
2167 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2304 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
2168 2305
2169 MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); 2306 MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
2170 MMIO_D(GEN7_OACONTROL, D_HSW); 2307 MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
2171 MMIO_D(0x2b00, D_BDW_PLUS); 2308 MMIO_D(0x2b00, D_BDW_PLUS);
2172 MMIO_D(0x2360, D_BDW_PLUS); 2309 MMIO_D(0x2360, D_BDW_PLUS);
2173 MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); 2310 MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2174 MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); 2311 MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2175 MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); 2312 MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2176 2313
2177 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2314 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2178 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2315 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2179 MMIO_D(BCS_SWCTRL, D_ALL); 2316 MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
2180 2317
2181 MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2318 MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2182 MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2319 MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2183 MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2320 MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2184 MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2321 MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2185 MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2322 MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2186 MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2323 MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2187 MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2324 MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2188 MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2325 MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2189 MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2326 MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2190 MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2327 MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2191 MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2328 MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
2192 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2329 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2193 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2330 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2194 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2331 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2196 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2333 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
2197 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2334 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2198 2335
2336 MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2337 MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
2338 MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2339 MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2340 MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
2341 MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
2342 MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
2343 MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2344 MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2345 MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2346 MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2199 return 0; 2347 return 0;
2200} 2348}
2201 2349
@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2204 struct drm_i915_private *dev_priv = gvt->dev_priv; 2352 struct drm_i915_private *dev_priv = gvt->dev_priv;
2205 int ret; 2353 int ret;
2206 2354
2207 MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, 2355 MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
2208 intel_vgpu_reg_imr_handler); 2356 intel_vgpu_reg_imr_handler);
2209 2357
2210 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); 2358 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2269 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, 2417 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2270 intel_vgpu_reg_master_irq_handler); 2418 intel_vgpu_reg_master_irq_handler);
2271 2419
2272 MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2420 MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2273 MMIO_D(0x1c134, D_BDW_PLUS); 2421 F_CMD_ACCESS, NULL, NULL);
2274 2422 MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2275 MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2423
2276 MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2424 MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2277 MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); 2425 NULL, NULL);
2278 MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2426 MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2279 MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2427 F_CMD_ACCESS, NULL, NULL);
2280 MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2428 MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2281 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); 2429 MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2282 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2430 NULL, NULL);
2283 NULL, NULL); 2431 MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2284 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2432 F_CMD_ACCESS, NULL, NULL);
2285 NULL, NULL); 2433 MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2434 F_CMD_ACCESS, NULL, NULL);
2435 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
2436 ring_mode_mmio_write);
2437 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2438 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2439 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2440 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2286 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2441 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2287 ring_timestamp_mmio_read, NULL); 2442 ring_timestamp_mmio_read, NULL);
2288 2443
2289 MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); 2444 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2290 2445
2291#define RING_REG(base) (base + 0xd0) 2446#define RING_REG(base) (base + 0xd0)
2292 MMIO_RING_F(RING_REG, 4, F_RO, 0, 2447 MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2303#undef RING_REG 2458#undef RING_REG
2304 2459
2305#define RING_REG(base) (base + 0x234) 2460#define RING_REG(base) (base + 0x234)
2306 MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); 2461 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
2307 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); 2462 NULL, NULL);
2463 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
2464 ~0LL, D_BDW_PLUS, NULL, NULL);
2308#undef RING_REG 2465#undef RING_REG
2309 2466
2310#define RING_REG(base) (base + 0x244) 2467#define RING_REG(base) (base + 0x244)
2311 MMIO_RING_D(RING_REG, D_BDW_PLUS); 2468 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2312 MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2469 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2470 NULL, NULL);
2313#undef RING_REG 2471#undef RING_REG
2314 2472
2315#define RING_REG(base) (base + 0x370) 2473#define RING_REG(base) (base + 0x370)
@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2331 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); 2489 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
2332 MMIO_D(0x1c054, D_BDW_PLUS); 2490 MMIO_D(0x1c054, D_BDW_PLUS);
2333 2491
2492 MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
2493
2334 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); 2494 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
2335 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); 2495 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
2336 2496
@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2341 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); 2501 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2342#undef RING_REG 2502#undef RING_REG
2343 2503
2344 MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2504 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
2345 MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); 2505 MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2346 2506
2347 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2507 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2348 2508
2349 MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); 2509 MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
2350 MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); 2510 MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
2351 MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); 2511 MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
2352 2512
2353 MMIO_D(WM_MISC, D_BDW); 2513 MMIO_D(WM_MISC, D_BDW);
2354 MMIO_D(BDW_EDP_PSR_BASE, D_BDW); 2514 MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2362 MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); 2522 MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
2363 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); 2523 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
2364 2524
2365 MMIO_D(0xfdc, D_BDW); 2525 MMIO_D(0xfdc, D_BDW_PLUS);
2366 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2526 MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2367 MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); 2527 NULL, NULL);
2368 MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); 2528 MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
2529 NULL, NULL);
2530 MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2369 2531
2370 MMIO_D(0xb1f0, D_BDW); 2532 MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
2371 MMIO_D(0xb1c0, D_BDW); 2533 MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
2372 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2534 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2373 MMIO_D(0xb100, D_BDW); 2535 MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
2374 MMIO_D(0xb10c, D_BDW); 2536 MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
2375 MMIO_D(0xb110, D_BDW); 2537 MMIO_D(0xb110, D_BDW);
2376 2538
2377 MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2539 MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
2378 MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2540 NULL, force_nonpriv_write);
2379 MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2541
2380 MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2542 MMIO_D(0x22040, D_BDW_PLUS);
2543 MMIO_D(0x44484, D_BDW_PLUS);
2544 MMIO_D(0x4448c, D_BDW_PLUS);
2381 2545
2382 MMIO_D(0x83a4, D_BDW); 2546 MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
2383 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); 2547 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
2384 2548
2385 MMIO_D(0x8430, D_BDW); 2549 MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
2386 2550
2387 MMIO_D(0x110000, D_BDW_PLUS); 2551 MMIO_D(0x110000, D_BDW_PLUS);
2388 2552
@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2394 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2558 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2395 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2559 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2396 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2560 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2397 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); 2561 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2398 2562
2399 MMIO_D(0x2248, D_BDW); 2563 MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
2400 2564
2565 MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2566 MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2567 MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2568 MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2569 MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2570 MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2571 MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2572 MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2573 MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2401 return 0; 2574 return 0;
2402} 2575}
2403 2576
@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2420 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); 2593 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
2421 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); 2594 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
2422 2595
2423 MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
2424 MMIO_D(0xa210, D_SKL_PLUS); 2596 MMIO_D(0xa210, D_SKL_PLUS);
2425 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2597 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2426 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2598 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2578 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); 2750 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
2579 2751
2580 MMIO_D(0xd08, D_SKL); 2752 MMIO_D(0xd08, D_SKL);
2581 MMIO_D(0x20e0, D_SKL); 2753 MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
2582 MMIO_D(0x20ec, D_SKL); 2754 MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2583 2755
2584 /* TRTT */ 2756 /* TRTT */
2585 MMIO_D(0x4de0, D_SKL); 2757 MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
2586 MMIO_D(0x4de4, D_SKL); 2758 MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
2587 MMIO_D(0x4de8, D_SKL); 2759 MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
2588 MMIO_D(0x4dec, D_SKL); 2760 MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
2589 MMIO_D(0x4df0, D_SKL); 2761 MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
2590 MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); 2762 MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
2591 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); 2763 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
2592 2764
2593 MMIO_D(0x45008, D_SKL); 2765 MMIO_D(0x45008, D_SKL);
@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2611 MMIO_D(0x65f08, D_SKL); 2783 MMIO_D(0x65f08, D_SKL);
2612 MMIO_D(0x320f0, D_SKL); 2784 MMIO_D(0x320f0, D_SKL);
2613 2785
2614 MMIO_D(_REG_VCS2_EXCC, D_SKL); 2786 MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
2615 MMIO_D(0x70034, D_SKL); 2787 MMIO_D(0x70034, D_SKL);
2616 MMIO_D(0x71034, D_SKL); 2788 MMIO_D(0x71034, D_SKL);
2617 MMIO_D(0x72034, D_SKL); 2789 MMIO_D(0x72034, D_SKL);
@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2624 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); 2796 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
2625 2797
2626 MMIO_D(0x44500, D_SKL); 2798 MMIO_D(0x44500, D_SKL);
2799 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2800 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
2801 NULL, NULL);
2627 return 0; 2802 return 0;
2628} 2803}
2629 2804
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0f7f5d97f582..84d801638ede 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
97 dma_addr_t daddr; 97 dma_addr_t daddr;
98 98
99 page = pfn_to_page(pfn); 99 if (unlikely(!pfn_valid(pfn)))
100 if (is_error_page(page))
101 return -EFAULT; 100 return -EFAULT;
102 101
102 page = pfn_to_page(pfn);
103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE, 103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
104 PCI_DMA_BIDIRECTIONAL); 104 PCI_DMA_BIDIRECTIONAL);
105 if (dma_mapping_error(dev, daddr)) 105 if (dma_mapping_error(dev, daddr))
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
295 return 0; 295 return 0;
296 296
297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" 297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
298 "fence: %d\n", 298 "fence: %d\nresolution: %s\n",
299 BYTES_TO_MB(type->low_gm_size), 299 BYTES_TO_MB(type->low_gm_size),
300 BYTES_TO_MB(type->high_gm_size), 300 BYTES_TO_MB(type->high_gm_size),
301 type->fence); 301 type->fence, vgpu_edid_str(type->resolution));
302} 302}
303 303
304static MDEV_TYPE_ATTR_RO(available_instances); 304static MDEV_TYPE_ATTR_RO(available_instances);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4df078bc5d04..60b698cb8365 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
57 (reg >= gvt->device_info.gtt_start_offset \ 57 (reg >= gvt->device_info.gtt_start_offset \
58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
59 59
60static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
61 void *p_data, unsigned int bytes, bool read)
62{
63 struct intel_gvt *gvt = NULL;
64 void *pt = NULL;
65 unsigned int offset = 0;
66
67 if (!vgpu || !p_data)
68 return;
69
70 gvt = vgpu->gvt;
71 mutex_lock(&gvt->lock);
72 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
73 if (reg_is_mmio(gvt, offset)) {
74 if (read)
75 intel_vgpu_default_mmio_read(vgpu, offset, p_data,
76 bytes);
77 else
78 intel_vgpu_default_mmio_write(vgpu, offset, p_data,
79 bytes);
80 } else if (reg_is_gtt(gvt, offset) &&
81 vgpu->gtt.ggtt_mm->virtual_page_table) {
82 offset -= gvt->device_info.gtt_start_offset;
83 pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
84 if (read)
85 memcpy(p_data, pt, bytes);
86 else
87 memcpy(pt, p_data, bytes);
88
89 } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
90 struct intel_vgpu_guest_page *gp;
91
92 /* Since we enter the failsafe mode early during guest boot,
93 * guest may not have chance to set up its ppgtt table, so
94 * there should not be any wp pages for guest. Keep the wp
95 * related code here in case we need to handle it in furture.
96 */
97 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
98 if (gp) {
99 /* remove write protection to prevent furture traps */
100 intel_vgpu_clean_guest_page(vgpu, gp);
101 if (read)
102 intel_gvt_hypervisor_read_gpa(vgpu, pa,
103 p_data, bytes);
104 else
105 intel_gvt_hypervisor_write_gpa(vgpu, pa,
106 p_data, bytes);
107 }
108 }
109 mutex_unlock(&gvt->lock);
110}
111
60/** 112/**
61 * intel_vgpu_emulate_mmio_read - emulate MMIO read 113 * intel_vgpu_emulate_mmio_read - emulate MMIO read
62 * @vgpu: a vGPU 114 * @vgpu: a vGPU
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
75 unsigned int offset = 0; 127 unsigned int offset = 0;
76 int ret = -EINVAL; 128 int ret = -EINVAL;
77 129
130
131 if (vgpu->failsafe) {
132 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
133 return 0;
134 }
78 mutex_lock(&gvt->lock); 135 mutex_lock(&gvt->lock);
79 136
80 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 137 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
188 u32 old_vreg = 0, old_sreg = 0; 245 u32 old_vreg = 0, old_sreg = 0;
189 int ret = -EINVAL; 246 int ret = -EINVAL;
190 247
248 if (vgpu->failsafe) {
249 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
250 return 0;
251 }
252
191 mutex_lock(&gvt->lock); 253 mutex_lock(&gvt->lock);
192 254
193 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 255 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
236 298
237 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 299 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
238 if (!mmio && !vgpu->mmio.disable_warn_untrack) 300 if (!mmio && !vgpu->mmio.disable_warn_untrack)
239 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", 301 gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
240 vgpu->id, offset, bytes, *(u32 *)p_data); 302 vgpu->id, offset, bytes, *(u32 *)p_data);
241 303
242 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 304 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
322 384
323 /* set the bit 0:2(Core C-State ) to C0 */ 385 /* set the bit 0:2(Core C-State ) to C0 */
324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 386 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
387
388 vgpu->mmio.disable_warn_untrack = false;
325} 389}
326 390
327/** 391/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d9fb41ab7119..5d1caf9daba9 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -27,7 +27,6 @@
27 27
28static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) 28static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
29{ 29{
30 void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
31 u8 *buf; 30 u8 *buf;
32 int i; 31 int i;
33 32
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
43 if (!vgpu_opregion(vgpu)->va) 42 if (!vgpu_opregion(vgpu)->va)
44 return -ENOMEM; 43 return -ENOMEM;
45 44
46 memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, 45 memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
47 INTEL_GVT_OPREGION_SIZE); 46 INTEL_GVT_OPREGION_SIZE);
48 47
49 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) 48 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
50 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; 49 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2b3a642284b6..73f052a4f424 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
53 {RCS, _MMIO(0x24d4), 0, false}, 53 {RCS, _MMIO(0x24d4), 0, false},
54 {RCS, _MMIO(0x24d8), 0, false}, 54 {RCS, _MMIO(0x24d8), 0, false},
55 {RCS, _MMIO(0x24dc), 0, false}, 55 {RCS, _MMIO(0x24dc), 0, false},
56 {RCS, _MMIO(0x24e0), 0, false},
57 {RCS, _MMIO(0x24e4), 0, false},
58 {RCS, _MMIO(0x24e8), 0, false},
59 {RCS, _MMIO(0x24ec), 0, false},
60 {RCS, _MMIO(0x24f0), 0, false},
61 {RCS, _MMIO(0x24f4), 0, false},
62 {RCS, _MMIO(0x24f8), 0, false},
63 {RCS, _MMIO(0x24fc), 0, false},
56 {RCS, _MMIO(0x7004), 0xffff, true}, 64 {RCS, _MMIO(0x7004), 0xffff, true},
57 {RCS, _MMIO(0x7008), 0xffff, true}, 65 {RCS, _MMIO(0x7008), 0xffff, true},
58 {RCS, _MMIO(0x7000), 0xffff, true}, 66 {RCS, _MMIO(0x7000), 0xffff, true},
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
76 {RCS, _MMIO(0x24d4), 0, false}, 84 {RCS, _MMIO(0x24d4), 0, false},
77 {RCS, _MMIO(0x24d8), 0, false}, 85 {RCS, _MMIO(0x24d8), 0, false},
78 {RCS, _MMIO(0x24dc), 0, false}, 86 {RCS, _MMIO(0x24dc), 0, false},
87 {RCS, _MMIO(0x24e0), 0, false},
88 {RCS, _MMIO(0x24e4), 0, false},
89 {RCS, _MMIO(0x24e8), 0, false},
90 {RCS, _MMIO(0x24ec), 0, false},
91 {RCS, _MMIO(0x24f0), 0, false},
92 {RCS, _MMIO(0x24f4), 0, false},
93 {RCS, _MMIO(0x24f8), 0, false},
94 {RCS, _MMIO(0x24fc), 0, false},
79 {RCS, _MMIO(0x7004), 0xffff, true}, 95 {RCS, _MMIO(0x7004), 0xffff, true},
80 {RCS, _MMIO(0x7008), 0xffff, true}, 96 {RCS, _MMIO(0x7008), 0xffff, true},
81 {RCS, _MMIO(0x7000), 0xffff, true}, 97 {RCS, _MMIO(0x7000), 0xffff, true},
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6b6d0efdd1a..d3a56c949025 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb,
139 struct intel_vgpu_workload *workload = 139 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id]; 140 scheduler->current_workload[req->engine->id];
141 141
142 if (unlikely(!workload))
143 return NOTIFY_OK;
144
142 switch (action) { 145 switch (action) {
143 case INTEL_CONTEXT_SCHEDULE_IN: 146 case INTEL_CONTEXT_SCHEDULE_IN:
144 intel_gvt_load_render_mmio(workload->vgpu, 147 intel_gvt_load_render_mmio(workload->vgpu,
@@ -148,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
148 case INTEL_CONTEXT_SCHEDULE_OUT: 151 case INTEL_CONTEXT_SCHEDULE_OUT:
149 intel_gvt_restore_render_mmio(workload->vgpu, 152 intel_gvt_restore_render_mmio(workload->vgpu,
150 workload->ring_id); 153 workload->ring_id);
154 /* If the status is -EINPROGRESS means this workload
155 * doesn't meet any issue during dispatching so when
156 * get the SCHEDULE_OUT set the status to be zero for
157 * good. If the status is NOT -EINPROGRESS means there
158 * is something wrong happened during dispatching and
159 * the status should not be set to zero
160 */
161 if (workload->status == -EINPROGRESS)
162 workload->status = 0;
151 atomic_set(&workload->shadow_ctx_active, 0); 163 atomic_set(&workload->shadow_ctx_active, 0);
152 break; 164 break;
153 default: 165 default:
@@ -359,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
359 workload = scheduler->current_workload[ring_id]; 371 workload = scheduler->current_workload[ring_id];
360 vgpu = workload->vgpu; 372 vgpu = workload->vgpu;
361 373
362 if (!workload->status && !vgpu->resetting) { 374 /* For the workload w/ request, needs to wait for the context
375 * switch to make sure request is completed.
376 * For the workload w/o request, directly complete the workload.
377 */
378 if (workload->req) {
363 wait_event(workload->shadow_ctx_status_wq, 379 wait_event(workload->shadow_ctx_status_wq,
364 !atomic_read(&workload->shadow_ctx_active)); 380 !atomic_read(&workload->shadow_ctx_active));
365 381
366 update_guest_context(workload); 382 i915_gem_request_put(fetch_and_zero(&workload->req));
383
384 if (!workload->status && !vgpu->resetting) {
385 update_guest_context(workload);
367 386
368 for_each_set_bit(event, workload->pending_events, 387 for_each_set_bit(event, workload->pending_events,
369 INTEL_GVT_EVENT_MAX) 388 INTEL_GVT_EVENT_MAX)
370 intel_vgpu_trigger_virtual_event(vgpu, event); 389 intel_vgpu_trigger_virtual_event(vgpu, event);
390 }
371 } 391 }
372 392
373 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 393 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -397,7 +417,6 @@ static int workload_thread(void *priv)
397 int ring_id = p->ring_id; 417 int ring_id = p->ring_id;
398 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
399 struct intel_vgpu_workload *workload = NULL; 419 struct intel_vgpu_workload *workload = NULL;
400 long lret;
401 int ret; 420 int ret;
402 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
403 DEFINE_WAIT_FUNC(wait, woken_wake_function); 422 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -446,23 +465,24 @@ static int workload_thread(void *priv)
446 465
447 gvt_dbg_sched("ring id %d wait workload %p\n", 466 gvt_dbg_sched("ring id %d wait workload %p\n",
448 workload->ring_id, workload); 467 workload->ring_id, workload);
449 468retry:
450 lret = i915_wait_request(workload->req, 469 i915_wait_request(workload->req,
451 0, MAX_SCHEDULE_TIMEOUT); 470 0, MAX_SCHEDULE_TIMEOUT);
452 if (lret < 0) { 471 /* I915 has replay mechanism and a request will be replayed
453 workload->status = lret; 472 * if there is i915 reset. So the seqno will be updated anyway.
454 gvt_err("fail to wait workload, skip\n"); 473 * If the seqno is not updated yet after waiting, which means
455 } else { 474 * the replay may still be in progress and we can wait again.
456 workload->status = 0; 475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
457 } 480 }
458 481
459complete: 482complete:
460 gvt_dbg_sched("will complete workload %p, status: %d\n", 483 gvt_dbg_sched("will complete workload %p, status: %d\n",
461 workload, workload->status); 484 workload, workload->status);
462 485
463 if (workload->req)
464 i915_gem_request_put(fetch_and_zero(&workload->req));
465
466 complete_current_workload(gvt, ring_id); 486 complete_current_workload(gvt, ring_id);
467 487
468 if (need_force_wake) 488 if (need_force_wake)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 95a97aa0051e..41cfa5ccae84 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
65} 65}
66 66
67static struct {
68 unsigned int low_mm;
69 unsigned int high_mm;
70 unsigned int fence;
71 enum intel_vgpu_edid edid;
72 char *name;
73} vgpu_types[] = {
74/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
79};
80
67/** 81/**
68 * intel_gvt_init_vgpu_types - initialize vGPU type list 82 * intel_gvt_init_vgpu_types - initialize vGPU type list
69 * @gvt : GVT device 83 * @gvt : GVT device
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
78 unsigned int min_low; 92 unsigned int min_low;
79 93
80 /* vGPU type name is defined as GVTg_Vx_y which contains 94 /* vGPU type name is defined as GVTg_Vx_y which contains
81 * physical GPU generation type and 'y' means maximum vGPU 95 * physical GPU generation type (e.g V4 as BDW server, V5 as
82 * instances user can create on one physical GPU for this 96 * SKL server).
83 * type.
84 * 97 *
85 * Depend on physical SKU resource, might see vGPU types like 98 * Depend on physical SKU resource, might see vGPU types like
86 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create 99 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
92 */ 105 */
93 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; 106 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
94 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; 107 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
95 num_types = 4; 108 num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
96 109
97 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), 110 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
98 GFP_KERNEL); 111 GFP_KERNEL);
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
101 114
102 min_low = MB_TO_BYTES(32); 115 min_low = MB_TO_BYTES(32);
103 for (i = 0; i < num_types; ++i) { 116 for (i = 0; i < num_types; ++i) {
104 if (low_avail / min_low == 0) 117 if (low_avail / vgpu_types[i].low_mm == 0)
105 break; 118 break;
106 gvt->types[i].low_gm_size = min_low; 119
107 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); 120 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
108 gvt->types[i].fence = 4; 121 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
109 gvt->types[i].max_instance = min(low_avail / min_low, 122 gvt->types[i].fence = vgpu_types[i].fence;
110 high_avail / gvt->types[i].high_gm_size); 123 gvt->types[i].resolution = vgpu_types[i].edid;
111 gvt->types[i].avail_instance = gvt->types[i].max_instance; 124 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
125 high_avail / vgpu_types[i].high_mm);
112 126
113 if (IS_GEN8(gvt->dev_priv)) 127 if (IS_GEN8(gvt->dev_priv))
114 sprintf(gvt->types[i].name, "GVTg_V4_%u", 128 sprintf(gvt->types[i].name, "GVTg_V4_%s",
115 gvt->types[i].max_instance); 129 vgpu_types[i].name);
116 else if (IS_GEN9(gvt->dev_priv)) 130 else if (IS_GEN9(gvt->dev_priv))
117 sprintf(gvt->types[i].name, "GVTg_V5_%u", 131 sprintf(gvt->types[i].name, "GVTg_V5_%s",
118 gvt->types[i].max_instance); 132 vgpu_types[i].name);
119 133
120 min_low <<= 1; 134 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
121 gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", 135 i, gvt->types[i].name,
122 i, gvt->types[i].name, gvt->types[i].max_instance,
123 gvt->types[i].avail_instance, 136 gvt->types[i].avail_instance,
124 gvt->types[i].low_gm_size, 137 gvt->types[i].low_gm_size,
125 gvt->types[i].high_gm_size, gvt->types[i].fence); 138 gvt->types[i].high_gm_size, gvt->types[i].fence,
139 vgpu_edid_str(gvt->types[i].resolution));
126 } 140 }
127 141
128 gvt->num_types = i; 142 gvt->num_types = i;
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
138{ 152{
139 int i; 153 int i;
140 unsigned int low_gm_avail, high_gm_avail, fence_avail; 154 unsigned int low_gm_avail, high_gm_avail, fence_avail;
141 unsigned int low_gm_min, high_gm_min, fence_min, total_min; 155 unsigned int low_gm_min, high_gm_min, fence_min;
142 156
143 /* Need to depend on maxium hw resource size but keep on 157 /* Need to depend on maxium hw resource size but keep on
144 * static config for now. 158 * static config for now.
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
154 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; 168 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
155 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; 169 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
156 fence_min = fence_avail / gvt->types[i].fence; 170 fence_min = fence_avail / gvt->types[i].fence;
157 total_min = min(min(low_gm_min, high_gm_min), fence_min); 171 gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
158 gvt->types[i].avail_instance = min(gvt->types[i].max_instance, 172 fence_min);
159 total_min);
160 173
161 gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", 174 gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
162 i, gvt->types[i].name, gvt->types[i].max_instance, 175 i, gvt->types[i].name,
163 gvt->types[i].avail_instance, gvt->types[i].low_gm_size, 176 gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
164 gvt->types[i].high_gm_size, gvt->types[i].fence); 177 gvt->types[i].high_gm_size, gvt->types[i].fence);
165 } 178 }
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
248 if (ret) 261 if (ret)
249 goto out_detach_hypervisor_vgpu; 262 goto out_detach_hypervisor_vgpu;
250 263
251 ret = intel_vgpu_init_display(vgpu); 264 ret = intel_vgpu_init_display(vgpu, param->resolution);
252 if (ret) 265 if (ret)
253 goto out_clean_gtt; 266 goto out_clean_gtt;
254 267
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
312 param.low_gm_sz = type->low_gm_size; 325 param.low_gm_sz = type->low_gm_size;
313 param.high_gm_sz = type->high_gm_size; 326 param.high_gm_sz = type->high_gm_size;
314 param.fence_sz = type->fence; 327 param.fence_sz = type->fence;
328 param.resolution = type->resolution;
315 329
316 /* XXX current param based on MB */ 330 /* XXX current param based on MB */
317 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); 331 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
387 populate_pvinfo_page(vgpu); 401 populate_pvinfo_page(vgpu);
388 intel_vgpu_reset_display(vgpu); 402 intel_vgpu_reset_display(vgpu);
389 403
390 if (dmlr) 404 if (dmlr) {
391 intel_vgpu_reset_cfg_space(vgpu); 405 intel_vgpu_reset_cfg_space(vgpu);
406 /* only reset the failsafe mode when dmlr reset */
407 vgpu->failsafe = false;
408 vgpu->pv_notified = false;
409 }
392 } 410 }
393 411
394 vgpu->resetting = false; 412 vgpu->resetting = false;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4b42d31391..7febe6eecf72 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -293,6 +293,7 @@ enum plane_id {
293 PLANE_PRIMARY, 293 PLANE_PRIMARY,
294 PLANE_SPRITE0, 294 PLANE_SPRITE0,
295 PLANE_SPRITE1, 295 PLANE_SPRITE1,
296 PLANE_SPRITE2,
296 PLANE_CURSOR, 297 PLANE_CURSOR,
297 I915_MAX_PLANES, 298 I915_MAX_PLANES,
298}; 299};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6908123162d1..10777da73039 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1434 1434
1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1435 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1436 1436
1437 ret = -ENODEV;
1438 if (obj->ops->pwrite)
1439 ret = obj->ops->pwrite(obj, args);
1440 if (ret != -ENODEV)
1441 goto err;
1442
1437 ret = i915_gem_object_wait(obj, 1443 ret = i915_gem_object_wait(obj,
1438 I915_WAIT_INTERRUPTIBLE | 1444 I915_WAIT_INTERRUPTIBLE |
1439 I915_WAIT_ALL, 1445 I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2119 */ 2125 */
2120 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2126 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2121 obj->mm.madv = __I915_MADV_PURGED; 2127 obj->mm.madv = __I915_MADV_PURGED;
2128 obj->mm.pages = ERR_PTR(-EFAULT);
2122} 2129}
2123 2130
2124/* Try to discard unwanted pages */ 2131/* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2218 2225
2219 __i915_gem_object_reset_page_iter(obj); 2226 __i915_gem_object_reset_page_iter(obj);
2220 2227
2221 obj->ops->put_pages(obj, pages); 2228 if (!IS_ERR(pages))
2229 obj->ops->put_pages(obj, pages);
2230
2222unlock: 2231unlock:
2223 mutex_unlock(&obj->mm.lock); 2232 mutex_unlock(&obj->mm.lock);
2224} 2233}
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2437 if (err) 2446 if (err)
2438 return err; 2447 return err;
2439 2448
2440 if (unlikely(!obj->mm.pages)) { 2449 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2441 err = ____i915_gem_object_get_pages(obj); 2450 err = ____i915_gem_object_get_pages(obj);
2442 if (err) 2451 if (err)
2443 goto unlock; 2452 goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2515 2524
2516 pinned = true; 2525 pinned = true;
2517 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2526 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2518 if (unlikely(!obj->mm.pages)) { 2527 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2519 ret = ____i915_gem_object_get_pages(obj); 2528 ret = ____i915_gem_object_get_pages(obj);
2520 if (ret) 2529 if (ret)
2521 goto err_unlock; 2530 goto err_unlock;
@@ -2563,6 +2572,75 @@ err_unlock:
2563 goto out_unlock; 2572 goto out_unlock;
2564} 2573}
2565 2574
2575static int
2576i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2577 const struct drm_i915_gem_pwrite *arg)
2578{
2579 struct address_space *mapping = obj->base.filp->f_mapping;
2580 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2581 u64 remain, offset;
2582 unsigned int pg;
2583
2584 /* Before we instantiate/pin the backing store for our use, we
2585 * can prepopulate the shmemfs filp efficiently using a write into
2586 * the pagecache. We avoid the penalty of instantiating all the
2587 * pages, important if the user is just writing to a few and never
2588 * uses the object on the GPU, and using a direct write into shmemfs
2589 * allows it to avoid the cost of retrieving a page (either swapin
2590 * or clearing-before-use) before it is overwritten.
2591 */
2592 if (READ_ONCE(obj->mm.pages))
2593 return -ENODEV;
2594
2595 /* Before the pages are instantiated the object is treated as being
2596 * in the CPU domain. The pages will be clflushed as required before
2597 * use, and we can freely write into the pages directly. If userspace
2598 * races pwrite with any other operation; corruption will ensue -
2599 * that is userspace's prerogative!
2600 */
2601
2602 remain = arg->size;
2603 offset = arg->offset;
2604 pg = offset_in_page(offset);
2605
2606 do {
2607 unsigned int len, unwritten;
2608 struct page *page;
2609 void *data, *vaddr;
2610 int err;
2611
2612 len = PAGE_SIZE - pg;
2613 if (len > remain)
2614 len = remain;
2615
2616 err = pagecache_write_begin(obj->base.filp, mapping,
2617 offset, len, 0,
2618 &page, &data);
2619 if (err < 0)
2620 return err;
2621
2622 vaddr = kmap(page);
2623 unwritten = copy_from_user(vaddr + pg, user_data, len);
2624 kunmap(page);
2625
2626 err = pagecache_write_end(obj->base.filp, mapping,
2627 offset, len, len - unwritten,
2628 page, data);
2629 if (err < 0)
2630 return err;
2631
2632 if (unwritten)
2633 return -EFAULT;
2634
2635 remain -= len;
2636 user_data += len;
2637 offset += len;
2638 pg = 0;
2639 } while (remain);
2640
2641 return 0;
2642}
2643
2566static bool ban_context(const struct i915_gem_context *ctx) 2644static bool ban_context(const struct i915_gem_context *ctx)
2567{ 2645{
2568 return (i915_gem_context_is_bannable(ctx) && 2646 return (i915_gem_context_is_bannable(ctx) &&
@@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3029 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3107 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3030 if (args->timeout_ns < 0) 3108 if (args->timeout_ns < 0)
3031 args->timeout_ns = 0; 3109 args->timeout_ns = 0;
3110
3111 /*
3112 * Apparently ktime isn't accurate enough and occasionally has a
3113 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3114 * things up to make the test happy. We allow up to 1 jiffy.
3115 *
3116 * This is a regression from the timespec->ktime conversion.
3117 */
3118 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3119 args->timeout_ns = 0;
3032 } 3120 }
3033 3121
3034 i915_gem_object_put(obj); 3122 i915_gem_object_put(obj);
@@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
3974static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4062static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3975 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4063 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3976 I915_GEM_OBJECT_IS_SHRINKABLE, 4064 I915_GEM_OBJECT_IS_SHRINKABLE,
4065
3977 .get_pages = i915_gem_object_get_pages_gtt, 4066 .get_pages = i915_gem_object_get_pages_gtt,
3978 .put_pages = i915_gem_object_put_pages_gtt, 4067 .put_pages = i915_gem_object_put_pages_gtt,
4068
4069 .pwrite = i915_gem_object_pwrite_gtt,
3979}; 4070};
3980 4071
3981struct drm_i915_gem_object * 4072struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c181b1bb3d2c..3be2503aa042 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
293 * those as well to make room for our guard pages. 293 * those as well to make room for our guard pages.
294 */ 294 */
295 if (check_color) { 295 if (check_color) {
296 if (vma->node.start + vma->node.size == node->start) { 296 if (node->start + node->size == target->start) {
297 if (vma->node.color == node->color) 297 if (node->color == target->color)
298 continue; 298 continue;
299 } 299 }
300 if (vma->node.start == node->start + node->size) { 300 if (node->start == target->start + target->size) {
301 if (vma->node.color == node->color) 301 if (node->color == target->color)
302 continue; 302 continue;
303 } 303 }
304 } 304 }
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index bf90b07163d1..76b80a0be797 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *); 54 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); 55 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
56 56
57 int (*pwrite)(struct drm_i915_gem_object *,
58 const struct drm_i915_gem_pwrite *);
59
57 int (*dmabuf_export)(struct drm_i915_gem_object *); 60 int (*dmabuf_export)(struct drm_i915_gem_object *);
58 void (*release)(struct drm_i915_gem_object *); 61 void (*release)(struct drm_i915_gem_object *);
59}; 62};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e84812..df20e9bc1c0f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -512,10 +512,36 @@ err_unpin:
512 return ret; 512 return ret;
513} 513}
514 514
515static void
516i915_vma_remove(struct i915_vma *vma)
517{
518 struct drm_i915_gem_object *obj = vma->obj;
519
520 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
521 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
522
523 drm_mm_remove_node(&vma->node);
524 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
525
526 /* Since the unbound list is global, only move to that list if
527 * no more VMAs exist.
528 */
529 if (--obj->bind_count == 0)
530 list_move_tail(&obj->global_link,
531 &to_i915(obj->base.dev)->mm.unbound_list);
532
533 /* And finally now the object is completely decoupled from this vma,
534 * we can drop its hold on the backing storage and allow it to be
535 * reaped by the shrinker.
536 */
537 i915_gem_object_unpin_pages(obj);
538 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539}
540
515int __i915_vma_do_pin(struct i915_vma *vma, 541int __i915_vma_do_pin(struct i915_vma *vma,
516 u64 size, u64 alignment, u64 flags) 542 u64 size, u64 alignment, u64 flags)
517{ 543{
518 unsigned int bound = vma->flags; 544 const unsigned int bound = vma->flags;
519 int ret; 545 int ret;
520 546
521 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 547 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
524 550
525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 551 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
526 ret = -EBUSY; 552 ret = -EBUSY;
527 goto err; 553 goto err_unpin;
528 } 554 }
529 555
530 if ((bound & I915_VMA_BIND_MASK) == 0) { 556 if ((bound & I915_VMA_BIND_MASK) == 0) {
531 ret = i915_vma_insert(vma, size, alignment, flags); 557 ret = i915_vma_insert(vma, size, alignment, flags);
532 if (ret) 558 if (ret)
533 goto err; 559 goto err_unpin;
534 } 560 }
535 561
536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 562 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
537 if (ret) 563 if (ret)
538 goto err; 564 goto err_remove;
539 565
540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 566 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
541 __i915_vma_set_map_and_fenceable(vma); 567 __i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 570 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
545 return 0; 571 return 0;
546 572
547err: 573err_remove:
574 if ((bound & I915_VMA_BIND_MASK) == 0) {
575 GEM_BUG_ON(vma->pages);
576 i915_vma_remove(vma);
577 }
578err_unpin:
548 __i915_vma_unpin(vma); 579 __i915_vma_unpin(vma);
549 return ret; 580 return ret;
550} 581}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
657 } 688 }
658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 689 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
659 690
660 drm_mm_remove_node(&vma->node);
661 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
662
663 if (vma->pages != obj->mm.pages) { 691 if (vma->pages != obj->mm.pages) {
664 GEM_BUG_ON(!vma->pages); 692 GEM_BUG_ON(!vma->pages);
665 sg_free_table(vma->pages); 693 sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
667 } 695 }
668 vma->pages = NULL; 696 vma->pages = NULL;
669 697
670 /* Since the unbound list is global, only move to that list if 698 i915_vma_remove(vma);
671 * no more VMAs exist. */
672 if (--obj->bind_count == 0)
673 list_move_tail(&obj->global_link,
674 &to_i915(obj->base.dev)->mm.unbound_list);
675
676 /* And finally now the object is completely decoupled from this vma,
677 * we can drop its hold on the backing storage and allow it to be
678 * reaped by the shrinker.
679 */
680 i915_gem_object_unpin_pages(obj);
681 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
682 699
683destroy: 700destroy:
684 if (unlikely(i915_vma_is_closed(vma))) 701 if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 01341670738f..3282b0f4b134 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3669 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3670 crtc->base.mode = crtc->base.state->mode; 3670 crtc->base.mode = crtc->base.state->mode;
3671 3671
3672 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3673 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3674 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3675
3676 /* 3672 /*
3677 * Update pipe size and adjust fitter if needed: the reason for this is 3673 * Update pipe size and adjust fitter if needed: the reason for this is
3678 * that in compute_mode_changes we check the native mode (not the pfit 3674 * that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
4796 struct intel_crtc_scaler_state *scaler_state = 4792 struct intel_crtc_scaler_state *scaler_state =
4797 &crtc->config->scaler_state; 4793 &crtc->config->scaler_state;
4798 4794
4799 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4800
4801 if (crtc->config->pch_pfit.enabled) { 4795 if (crtc->config->pch_pfit.enabled) {
4802 int id; 4796 int id;
4803 4797
4804 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4798 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4805 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4806 return; 4799 return;
4807 }
4808 4800
4809 id = scaler_state->scaler_id; 4801 id = scaler_state->scaler_id;
4810 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4802 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4811 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4803 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4812 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4804 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4813 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4805 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4814
4815 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4816 } 4806 }
4817} 4807}
4818 4808
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
14379 } while (progress); 14369 } while (progress);
14380} 14370}
14381 14371
14372static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14373{
14374 struct intel_atomic_state *state, *next;
14375 struct llist_node *freed;
14376
14377 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14378 llist_for_each_entry_safe(state, next, freed, freed)
14379 drm_atomic_state_put(&state->base);
14380}
14381
14382static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14383{
14384 struct drm_i915_private *dev_priv =
14385 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14386
14387 intel_atomic_helper_free_state(dev_priv);
14388}
14389
14382static void intel_atomic_commit_tail(struct drm_atomic_state *state) 14390static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14383{ 14391{
14384 struct drm_device *dev = state->dev; 14392 struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
14545 * can happen also when the device is completely off. 14553 * can happen also when the device is completely off.
14546 */ 14554 */
14547 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 14555 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
14556
14557 intel_atomic_helper_free_state(dev_priv);
14548} 14558}
14549 14559
14550static void intel_atomic_commit_work(struct work_struct *work) 14560static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14946 to_intel_atomic_state(old_crtc_state->state); 14956 to_intel_atomic_state(old_crtc_state->state);
14947 bool modeset = needs_modeset(crtc->state); 14957 bool modeset = needs_modeset(crtc->state);
14948 14958
14959 if (!modeset &&
14960 (intel_cstate->base.color_mgmt_changed ||
14961 intel_cstate->update_pipe)) {
14962 intel_color_set_csc(crtc->state);
14963 intel_color_load_luts(crtc->state);
14964 }
14965
14949 /* Perform vblank evasion around commit operation */ 14966 /* Perform vblank evasion around commit operation */
14950 intel_pipe_update_start(intel_crtc); 14967 intel_pipe_update_start(intel_crtc);
14951 14968
14952 if (modeset) 14969 if (modeset)
14953 goto out; 14970 goto out;
14954 14971
14955 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14956 intel_color_set_csc(crtc->state);
14957 intel_color_load_luts(crtc->state);
14958 }
14959
14960 if (intel_cstate->update_pipe) 14972 if (intel_cstate->update_pipe)
14961 intel_update_pipe_config(intel_crtc, old_intel_cstate); 14973 intel_update_pipe_config(intel_crtc, old_intel_cstate);
14962 else if (INTEL_GEN(dev_priv) >= 9) 14974 else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ fail:
16599 drm_modeset_acquire_fini(&ctx); 16611 drm_modeset_acquire_fini(&ctx);
16600} 16612}
16601 16613
16602static void intel_atomic_helper_free_state(struct work_struct *work)
16603{
16604 struct drm_i915_private *dev_priv =
16605 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16606 struct intel_atomic_state *state, *next;
16607 struct llist_node *freed;
16608
16609 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16610 llist_for_each_entry_safe(state, next, freed, freed)
16611 drm_atomic_state_put(&state->base);
16612}
16613
16614int intel_modeset_init(struct drm_device *dev) 16614int intel_modeset_init(struct drm_device *dev)
16615{ 16615{
16616 struct drm_i915_private *dev_priv = to_i915(dev); 16616 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
16631 dev->mode_config.funcs = &intel_mode_funcs; 16631 dev->mode_config.funcs = &intel_mode_funcs;
16632 16632
16633 INIT_WORK(&dev_priv->atomic_helper.free_work, 16633 INIT_WORK(&dev_priv->atomic_helper.free_work,
16634 intel_atomic_helper_free_state); 16634 intel_atomic_helper_free_state_worker);
16635 16635
16636 intel_init_quirks(dev); 16636 intel_init_quirks(dev);
16637 16637
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 1b8ba2e77539..2d449fb5d1d2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
357 bool *enabled, int width, int height) 357 bool *enabled, int width, int height)
358{ 358{
359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); 359 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
360 unsigned long conn_configured, mask; 360 unsigned long conn_configured, conn_seq, mask;
361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); 361 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
362 int i, j; 362 int i, j;
363 bool *save_enabled; 363 bool *save_enabled;
364 bool fallback = true; 364 bool fallback = true;
365 int num_connectors_enabled = 0; 365 int num_connectors_enabled = 0;
366 int num_connectors_detected = 0; 366 int num_connectors_detected = 0;
367 int pass = 0;
368 367
369 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); 368 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
370 if (!save_enabled) 369 if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
374 mask = BIT(count) - 1; 373 mask = BIT(count) - 1;
375 conn_configured = 0; 374 conn_configured = 0;
376retry: 375retry:
376 conn_seq = conn_configured;
377 for (i = 0; i < count; i++) { 377 for (i = 0; i < count; i++) {
378 struct drm_fb_helper_connector *fb_conn; 378 struct drm_fb_helper_connector *fb_conn;
379 struct drm_connector *connector; 379 struct drm_connector *connector;
@@ -387,7 +387,7 @@ retry:
387 if (conn_configured & BIT(i)) 387 if (conn_configured & BIT(i))
388 continue; 388 continue;
389 389
390 if (pass == 0 && !connector->has_tile) 390 if (conn_seq == 0 && !connector->has_tile)
391 continue; 391 continue;
392 392
393 if (connector->status == connector_status_connected) 393 if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ retry:
498 conn_configured |= BIT(i); 498 conn_configured |= BIT(i);
499 } 499 }
500 500
501 if ((conn_configured & mask) != mask) { 501 if ((conn_configured & mask) != mask && conn_configured != conn_seq)
502 pass++;
503 goto retry; 502 goto retry;
504 }
505 503
506 /* 504 /*
507 * If the BIOS didn't enable everything it could, fall back to have the 505 * If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 249623d45be0..940bab22d464 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4891 break; 4891 break;
4892 } 4892 }
4893 4893
4894 /* When byt can survive without system hang with dynamic
4895 * sw freq adjustments, this restriction can be lifted.
4896 */
4897 if (IS_VALLEYVIEW(dev_priv))
4898 goto skip_hw_write;
4899
4894 I915_WRITE(GEN6_RP_UP_EI, 4900 I915_WRITE(GEN6_RP_UP_EI,
4895 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4901 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4896 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4902 I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4911 GEN6_RP_UP_BUSY_AVG | 4917 GEN6_RP_UP_BUSY_AVG |
4912 GEN6_RP_DOWN_IDLE_AVG); 4918 GEN6_RP_DOWN_IDLE_AVG);
4913 4919
4920skip_hw_write:
4914 dev_priv->rps.power = new_power; 4921 dev_priv->rps.power = new_power;
4915 dev_priv->rps.up_threshold = threshold_up; 4922 dev_priv->rps.up_threshold = threshold_up;
4916 dev_priv->rps.down_threshold = threshold_down; 4923 dev_priv->rps.down_threshold = threshold_down;
@@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
7916 * @timeout_base_ms: timeout for polling with preemption enabled 7923 * @timeout_base_ms: timeout for polling with preemption enabled
7917 * 7924 *
7918 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7925 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7919 * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. 7926 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
7920 * The request is acknowledged once the PCODE reply dword equals @reply after 7927 * The request is acknowledged once the PCODE reply dword equals @reply after
7921 * applying @reply_mask. Polling is first attempted with preemption enabled 7928 * applying @reply_mask. Polling is first attempted with preemption enabled
7922 * for @timeout_base_ms and if this times out for another 10 ms with 7929 * for @timeout_base_ms and if this times out for another 50 ms with
7923 * preemption disabled. 7930 * preemption disabled.
7924 * 7931 *
7925 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 7932 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
7955 * worst case) _and_ PCODE was busy for some reason even after a 7962 * worst case) _and_ PCODE was busy for some reason even after a
7956 * (queued) request and @timeout_base_ms delay. As a workaround retry 7963 * (queued) request and @timeout_base_ms delay. As a workaround retry
7957 * the poll with preemption disabled to maximize the number of 7964 * the poll with preemption disabled to maximize the number of
7958 * requests. Increase the timeout from @timeout_base_ms to 10ms to 7965 * requests. Increase the timeout from @timeout_base_ms to 50ms to
7959 * account for interrupts that could reduce the number of these 7966 * account for interrupts that could reduce the number of these
7960 * requests. 7967 * requests, and for any quirks of the PCODE firmware that delays
7968 * the request completion.
7961 */ 7969 */
7962 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7970 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7963 WARN_ON_ONCE(timeout_base_ms > 3); 7971 WARN_ON_ONCE(timeout_base_ms > 3);
7964 preempt_disable(); 7972 preempt_disable();
7965 ret = wait_for_atomic(COND, 10); 7973 ret = wait_for_atomic(COND, 50);
7966 preempt_enable(); 7974 preempt_enable();
7967 7975
7968out: 7976out:
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9ef54688872a..9481ca9a3ae7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
254 int scaler_id = plane_state->scaler_id; 254 int scaler_id = plane_state->scaler_id;
255 const struct intel_scaler *scaler; 255 const struct intel_scaler *scaler;
256 256
257 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
258 plane_id, PS_PLANE_SEL(plane_id));
259
260 scaler = &crtc_state->scaler_state.scalers[scaler_id]; 257 scaler = &crtc_state->scaler_state.scalers[scaler_id];
261 258
262 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), 259 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abe08885a5ba..b7ff592b14f5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
119 119
120 for_each_fw_domain_masked(d, fw_domains, dev_priv) 120 for_each_fw_domain_masked(d, fw_domains, dev_priv)
121 fw_domain_wait_ack(d); 121 fw_domain_wait_ack(d);
122
123 dev_priv->uncore.fw_domains_active |= fw_domains;
122} 124}
123 125
124static void 126static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
130 fw_domain_put(d); 132 fw_domain_put(d);
131 fw_domain_posting_read(d); 133 fw_domain_posting_read(d);
132 } 134 }
135
136 dev_priv->uncore.fw_domains_active &= ~fw_domains;
133} 137}
134 138
135static void 139static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
240 if (WARN_ON(domain->wake_count == 0)) 244 if (WARN_ON(domain->wake_count == 0))
241 domain->wake_count++; 245 domain->wake_count++;
242 246
243 if (--domain->wake_count == 0) { 247 if (--domain->wake_count == 0)
244 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); 248 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
245 dev_priv->uncore.fw_domains_active &= ~domain->mask;
246 }
247 249
248 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 250 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
249 251
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
454 fw_domains &= ~domain->mask; 456 fw_domains &= ~domain->mask;
455 } 457 }
456 458
457 if (fw_domains) { 459 if (fw_domains)
458 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 460 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
459 dev_priv->uncore.fw_domains_active |= fw_domains;
460 }
461} 461}
462 462
463/** 463/**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
968 fw_domain_arm_timer(domain); 968 fw_domain_arm_timer(domain);
969 969
970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
971 dev_priv->uncore.fw_domains_active |= fw_domains;
972} 971}
973 972
974static inline void __force_wake_auto(struct drm_i915_private *dev_priv, 973static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index e10a4eda4078..1144e0c9e894 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
65 switch (format) { 65 switch (format) {
66 case DRM_FORMAT_RGB565: 66 case DRM_FORMAT_RGB565:
67 dev_dbg(drm->dev, "Setting up RGB565 mode\n"); 67 dev_dbg(drm->dev, "Setting up RGB565 mode\n");
68 ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
69 ctrl |= CTRL_SET_WORD_LENGTH(0); 68 ctrl |= CTRL_SET_WORD_LENGTH(0);
70 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); 69 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
71 break; 70 break;
72 case DRM_FORMAT_XRGB8888: 71 case DRM_FORMAT_XRGB8888:
73 dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); 72 dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
74 ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
75 ctrl |= CTRL_SET_WORD_LENGTH(3); 73 ctrl |= CTRL_SET_WORD_LENGTH(3);
76 /* Do not use packed pixels = one pixel per word instead. */ 74 /* Do not use packed pixels = one pixel per word instead. */
77 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); 75 ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
87 return 0; 85 return 0;
88} 86}
89 87
88static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
89{
90 struct drm_crtc *crtc = &mxsfb->pipe.crtc;
91 struct drm_device *drm = crtc->dev;
92 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
93 u32 reg;
94
95 reg = readl(mxsfb->base + LCDC_CTRL);
96
97 if (mxsfb->connector.display_info.num_bus_formats)
98 bus_format = mxsfb->connector.display_info.bus_formats[0];
99
100 reg &= ~CTRL_BUS_WIDTH_MASK;
101 switch (bus_format) {
102 case MEDIA_BUS_FMT_RGB565_1X16:
103 reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
104 break;
105 case MEDIA_BUS_FMT_RGB666_1X18:
106 reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
107 break;
108 case MEDIA_BUS_FMT_RGB888_1X24:
109 reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
110 break;
111 default:
112 dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
113 break;
114 }
115 writel(reg, mxsfb->base + LCDC_CTRL);
116}
117
90static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) 118static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
91{ 119{
92 u32 reg; 120 u32 reg;
@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
168 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; 196 vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
169 if (m->flags & DRM_MODE_FLAG_PVSYNC) 197 if (m->flags & DRM_MODE_FLAG_PVSYNC)
170 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; 198 vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
171 if (bus_flags & DRM_BUS_FLAG_DE_HIGH) 199 /* Make sure Data Enable is high active by default */
200 if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
172 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; 201 vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
173 if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) 202 /*
203 * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
204 * controllers VDCTRL0_DOTCLK is display centric.
205 * Drive on positive edge -> display samples on falling edge
206 * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
207 */
208 if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
174 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; 209 vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
175 210
176 writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); 211 writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
177 212
213 mxsfb_set_bus_fmt(mxsfb);
214
178 /* Frame length in lines. */ 215 /* Frame length in lines. */
179 writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); 216 writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
180 217
@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
184 VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), 221 VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
185 mxsfb->base + LCDC_VDCTRL2); 222 mxsfb->base + LCDC_VDCTRL2);
186 223
187 writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) | 224 writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
188 SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end), 225 SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
189 mxsfb->base + LCDC_VDCTRL3); 226 mxsfb->base + LCDC_VDCTRL3);
190 227
191 writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), 228 writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index cdfbe0284635..ff6d6a6f842e 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
102{ 102{
103 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); 103 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
104 104
105 drm_panel_prepare(mxsfb->panel);
105 mxsfb_crtc_enable(mxsfb); 106 mxsfb_crtc_enable(mxsfb);
107 drm_panel_enable(mxsfb->panel);
106} 108}
107 109
108static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) 110static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
109{ 111{
110 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); 112 struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
111 113
114 drm_panel_disable(mxsfb->panel);
112 mxsfb_crtc_disable(mxsfb); 115 mxsfb_crtc_disable(mxsfb);
116 drm_panel_unprepare(mxsfb->panel);
113} 117}
114 118
115static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, 119static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index fa8d17399407..b8e81422d4e2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
112 112
113int mxsfb_create_output(struct drm_device *drm) 113int mxsfb_create_output(struct drm_device *drm)
114{ 114{
115 struct mxsfb_drm_private *mxsfb = drm->dev_private;
115 struct device_node *ep_np = NULL; 116 struct device_node *ep_np = NULL;
116 struct of_endpoint ep; 117 struct of_endpoint ep;
117 int ret; 118 int ret;
@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm)
127 } 128 }
128 } 129 }
129 130
131 if (!mxsfb->panel)
132 return -EPROBE_DEFER;
133
130 return 0; 134 return 0;
131} 135}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
index 31d62cd0d3d7..66a6ba9ec533 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -44,6 +44,7 @@
44#define CTRL_DATA_SELECT (1 << 16) 44#define CTRL_DATA_SELECT (1 << 16)
45#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) 45#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
46#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) 46#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
47#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
47#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) 48#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
48#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) 49#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
49#define CTRL_MASTER (1 << 5) 50#define CTRL_MASTER (1 << 5)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c35d813..ee5883f59be5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
147 struct drm_gem_object *obj = buffer->priv; 147 struct drm_gem_object *obj = buffer->priv;
148 int ret = 0; 148 int ret = 0;
149 149
150 if (WARN_ON(!obj->filp))
151 return -EINVAL;
152
153 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); 150 ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
154 if (ret < 0) 151 if (ret < 0)
155 return ret; 152 return ret;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d12b8978142f..72e1588580a1 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2984 (rdev->pdev->device == 0x6667)) { 2984 (rdev->pdev->device == 0x6667)) {
2985 max_sclk = 75000; 2985 max_sclk = 75000;
2986 } 2986 }
2987 } else if (rdev->family == CHIP_OLAND) {
2988 if ((rdev->pdev->device == 0x6604) &&
2989 (rdev->pdev->subsystem_vendor == 0x1028) &&
2990 (rdev->pdev->subsystem_device == 0x066F)) {
2991 max_sclk = 75000;
2992 }
2987 } 2993 }
2988 2994
2989 if (rps->vce_active) { 2995 if (rps->vce_active) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index b5bfbe50bd87..b0ff304ce3dc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
32{ 32{
33 const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; 33 const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
34 struct rcar_du_device *rcdu = crtc->group->dev; 34 struct rcar_du_device *rcdu = crtc->group->dev;
35 struct vsp1_du_lif_config cfg = {
36 .width = mode->hdisplay,
37 .height = mode->vdisplay,
38 };
35 struct rcar_du_plane_state state = { 39 struct rcar_du_plane_state state = {
36 .state = { 40 .state = {
37 .crtc = &crtc->crtc, 41 .crtc = &crtc->crtc,
@@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
66 */ 70 */
67 crtc->group->need_restart = true; 71 crtc->group->need_restart = true;
68 72
69 vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay); 73 vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
70} 74}
71 75
72void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) 76void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
73{ 77{
74 vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0); 78 vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
75} 79}
76 80
77void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) 81void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index f80bf9385e41..d745e8b50fb8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
464{ 464{
465 struct drm_device *dev = crtc->dev; 465 struct drm_device *dev = crtc->dev;
466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
467 unsigned long flags;
467 468
468 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 469 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
469 mutex_lock(&tilcdc_crtc->enable_lock); 470 mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
484 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 485 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
485 LCDC_PALETTE_LOAD_MODE(DATA_ONLY), 486 LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
486 LCDC_PALETTE_LOAD_MODE_MASK); 487 LCDC_PALETTE_LOAD_MODE_MASK);
488
489 /* There is no real chance for a race here as the time stamp
490 * is taken before the raster DMA is started. The spin-lock is
491 * taken to have a memory barrier after taking the time-stamp
492 * and to avoid a context switch between taking the stamp and
493 * enabling the raster.
494 */
495 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
496 tilcdc_crtc->last_vblank = ktime_get();
487 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 497 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
498 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
488 499
489 drm_crtc_vblank_on(crtc); 500 drm_crtc_vblank_on(crtc);
490 501
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
539 } 550 }
540 551
541 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 552 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
542 tilcdc_crtc->last_vblank = 0;
543 553
544 tilcdc_crtc->enabled = false; 554 tilcdc_crtc->enabled = false;
545 mutex_unlock(&tilcdc_crtc->enable_lock); 555 mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
602{ 612{
603 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 613 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
604 struct drm_device *dev = crtc->dev; 614 struct drm_device *dev = crtc->dev;
605 unsigned long flags;
606 615
607 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 616 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
608 617
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
614 drm_framebuffer_reference(fb); 623 drm_framebuffer_reference(fb);
615 624
616 crtc->primary->fb = fb; 625 crtc->primary->fb = fb;
626 tilcdc_crtc->event = event;
617 627
618 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 628 mutex_lock(&tilcdc_crtc->enable_lock);
619 629
620 if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { 630 if (tilcdc_crtc->enabled) {
631 unsigned long flags;
621 ktime_t next_vblank; 632 ktime_t next_vblank;
622 s64 tdiff; 633 s64 tdiff;
623 634
624 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 635 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
625 1000000 / crtc->hwmode.vrefresh);
626 636
637 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
638 1000000 / crtc->hwmode.vrefresh);
627 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); 639 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
628 640
629 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) 641 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
630 tilcdc_crtc->next_fb = fb; 642 tilcdc_crtc->next_fb = fb;
631 } 643 else
632 644 set_scanout(crtc, fb);
633 if (tilcdc_crtc->next_fb != fb)
634 set_scanout(crtc, fb);
635 645
636 tilcdc_crtc->event = event; 646 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
647 }
637 648
638 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 649 mutex_unlock(&tilcdc_crtc->enable_lock);
639 650
640 return 0; 651 return 0;
641} 652}
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
1036 1047
1037fail: 1048fail:
1038 tilcdc_crtc_destroy(crtc); 1049 tilcdc_crtc_destroy(crtc);
1039 return -ENOMEM; 1050 return ret;
1040} 1051}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 81a80c82f1bd..bd0d1988feb2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
543 /* 543 /*
544 * In case a device driver's probe() fails (e.g., 544 * In case a device driver's probe() fails (e.g.,
545 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is 545 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
546 * rescinded later (e.g., we dynamically disble an Integrated Service 546 * rescinded later (e.g., we dynamically disable an Integrated Service
547 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): 547 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
548 * here we should skip most of the below cleanup work. 548 * here we should skip most of the below cleanup work.
549 */ 549 */
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 0652281662a8..78792b4d6437 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
465 u8 *tmp_buf; 465 u8 *tmp_buf;
466 int len = 0; 466 int len = 0;
467 int xfersz = brcmstb_i2c_get_xfersz(dev); 467 int xfersz = brcmstb_i2c_get_xfersz(dev);
468 u32 cond, cond_per_msg;
468 469
469 if (dev->is_suspended) 470 if (dev->is_suspended)
470 return -EBUSY; 471 return -EBUSY;
@@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
481 pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); 482 pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
482 483
483 if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) 484 if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
484 brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP)); 485 cond = ~COND_START_STOP;
485 else 486 else
486 brcmstb_set_i2c_start_stop(dev, 487 cond = COND_RESTART | COND_NOSTOP;
487 COND_RESTART | COND_NOSTOP); 488
489 brcmstb_set_i2c_start_stop(dev, cond);
488 490
489 /* Send slave address */ 491 /* Send slave address */
490 if (!(pmsg->flags & I2C_M_NOSTART)) { 492 if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
497 } 499 }
498 } 500 }
499 501
502 cond_per_msg = cond;
503
500 /* Perform data transfer */ 504 /* Perform data transfer */
501 while (len) { 505 while (len) {
502 bytes_to_xfer = min(len, xfersz); 506 bytes_to_xfer = min(len, xfersz);
503 507
504 if (len <= xfersz && i == (num - 1)) 508 if (len <= xfersz) {
505 brcmstb_set_i2c_start_stop(dev, 509 if (i == (num - 1))
506 ~(COND_START_STOP)); 510 cond_per_msg = cond_per_msg &
511 ~(COND_RESTART | COND_NOSTOP);
512 else
513 cond_per_msg = cond;
514 } else {
515 cond_per_msg = (cond_per_msg & ~COND_RESTART) |
516 COND_NOSTOP;
517 }
518
519 brcmstb_set_i2c_start_stop(dev, cond_per_msg);
507 520
508 rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, 521 rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
509 bytes_to_xfer, pmsg); 522 bytes_to_xfer, pmsg);
@@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
512 525
513 len -= bytes_to_xfer; 526 len -= bytes_to_xfer;
514 tmp_buf += bytes_to_xfer; 527 tmp_buf += bytes_to_xfer;
528
529 cond_per_msg = COND_NOSTART | COND_NOSTOP;
515 } 530 }
516 } 531 }
517 532
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index c1db3a5a340f..d9aaf1790e0e 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -88,6 +88,7 @@ struct dw_i2c_dev {
88 void __iomem *base; 88 void __iomem *base;
89 struct completion cmd_complete; 89 struct completion cmd_complete;
90 struct clk *clk; 90 struct clk *clk;
91 struct reset_control *rst;
91 u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); 92 u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
92 struct dw_pci_controller *controller; 93 struct dw_pci_controller *controller;
93 int cmd_err; 94 int cmd_err;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 6ce431323125..79c4b4ea0539 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -38,6 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/property.h> 39#include <linux/property.h>
40#include <linux/io.h> 40#include <linux/io.h>
41#include <linux/reset.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/acpi.h> 43#include <linux/acpi.h>
43#include <linux/platform_data/i2c-designware.h> 44#include <linux/platform_data/i2c-designware.h>
@@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
199 dev->irq = irq; 200 dev->irq = irq;
200 platform_set_drvdata(pdev, dev); 201 platform_set_drvdata(pdev, dev);
201 202
203 dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
204 if (IS_ERR(dev->rst)) {
205 if (PTR_ERR(dev->rst) == -EPROBE_DEFER)
206 return -EPROBE_DEFER;
207 } else {
208 reset_control_deassert(dev->rst);
209 }
210
202 if (pdata) { 211 if (pdata) {
203 dev->clk_freq = pdata->i2c_scl_freq; 212 dev->clk_freq = pdata->i2c_scl_freq;
204 } else { 213 } else {
@@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
235 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { 244 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
236 dev_err(&pdev->dev, 245 dev_err(&pdev->dev,
237 "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); 246 "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
238 return -EINVAL; 247 r = -EINVAL;
248 goto exit_reset;
239 } 249 }
240 250
241 r = i2c_dw_eval_lock_support(dev); 251 r = i2c_dw_eval_lock_support(dev);
242 if (r) 252 if (r)
243 return r; 253 goto exit_reset;
244 254
245 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; 255 dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
246 256
@@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
286 } 296 }
287 297
288 r = i2c_dw_probe(dev); 298 r = i2c_dw_probe(dev);
289 if (r && !dev->pm_runtime_disabled) 299 if (r)
290 pm_runtime_disable(&pdev->dev); 300 goto exit_probe;
291 301
292 return r; 302 return r;
303
304exit_probe:
305 if (!dev->pm_runtime_disabled)
306 pm_runtime_disable(&pdev->dev);
307exit_reset:
308 if (!IS_ERR_OR_NULL(dev->rst))
309 reset_control_assert(dev->rst);
310 return r;
293} 311}
294 312
295static int dw_i2c_plat_remove(struct platform_device *pdev) 313static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -306,6 +324,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
306 pm_runtime_put_sync(&pdev->dev); 324 pm_runtime_put_sync(&pdev->dev);
307 if (!dev->pm_runtime_disabled) 325 if (!dev->pm_runtime_disabled)
308 pm_runtime_disable(&pdev->dev); 326 pm_runtime_disable(&pdev->dev);
327 if (!IS_ERR_OR_NULL(dev->rst))
328 reset_control_assert(dev->rst);
309 329
310 return 0; 330 return 0;
311} 331}
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index cbd93ce0661f..736a82472101 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
457 457
458 int_status = readl(i2c->regs + HSI2C_INT_STATUS); 458 int_status = readl(i2c->regs + HSI2C_INT_STATUS);
459 writel(int_status, i2c->regs + HSI2C_INT_STATUS); 459 writel(int_status, i2c->regs + HSI2C_INT_STATUS);
460 trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
461 460
462 /* handle interrupt related to the transfer status */ 461 /* handle interrupt related to the transfer status */
463 if (i2c->variant->hw == HSI2C_EXYNOS7) { 462 if (i2c->variant->hw == HSI2C_EXYNOS7) {
@@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
482 goto stop; 481 goto stop;
483 } 482 }
484 483
484 trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
485 if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) { 485 if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) {
486 i2c->state = -EAGAIN; 486 i2c->state = -EAGAIN;
487 goto stop; 487 goto stop;
488 } 488 }
489 } else if (int_status & HSI2C_INT_I2C) { 489 } else if (int_status & HSI2C_INT_I2C) {
490 trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
490 if (trans_status & HSI2C_NO_DEV_ACK) { 491 if (trans_status & HSI2C_NO_DEV_ACK) {
491 dev_dbg(i2c->dev, "No ACK from device\n"); 492 dev_dbg(i2c->dev, "No ACK from device\n");
492 i2c->state = -ENXIO; 493 i2c->state = -ENXIO;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 2aa61bbbd307..73b97c71a484 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
175 wdata1 |= *buf++ << ((i - 4) * 8); 175 wdata1 |= *buf++ << ((i - 4) * 8);
176 176
177 writel(wdata0, i2c->regs + REG_TOK_WDATA0); 177 writel(wdata0, i2c->regs + REG_TOK_WDATA0);
178 writel(wdata0, i2c->regs + REG_TOK_WDATA1); 178 writel(wdata1, i2c->regs + REG_TOK_WDATA1);
179 179
180 dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__, 180 dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
181 wdata0, wdata1, len); 181 wdata0, wdata1, len);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 4a7d9bc2142b..45d61714c81b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = {
172 .max_comb_2nd_msg_len = 31, 172 .max_comb_2nd_msg_len = 31,
173}; 173};
174 174
175static const struct i2c_adapter_quirks mt8173_i2c_quirks = {
176 .max_num_msgs = 65535,
177 .max_write_len = 65535,
178 .max_read_len = 65535,
179 .max_comb_1st_msg_len = 65535,
180 .max_comb_2nd_msg_len = 65535,
181};
182
183static const struct mtk_i2c_compatible mt6577_compat = { 175static const struct mtk_i2c_compatible mt6577_compat = {
184 .quirks = &mt6577_i2c_quirks, 176 .quirks = &mt6577_i2c_quirks,
185 .pmic_i2c = 0, 177 .pmic_i2c = 0,
@@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = {
199}; 191};
200 192
201static const struct mtk_i2c_compatible mt8173_compat = { 193static const struct mtk_i2c_compatible mt8173_compat = {
202 .quirks = &mt8173_i2c_quirks,
203 .pmic_i2c = 0, 194 .pmic_i2c = 0,
204 .dcm = 1, 195 .dcm = 1,
205 .auto_restart = 1, 196 .auto_restart = 1,
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 8f11d347b3ec..c811af4c8d81 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
218 } 218 }
219 219
220 if (riic->is_last || riic->err) { 220 if (riic->is_last || riic->err) {
221 riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); 221 riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
222 writeb(ICCR2_SP, riic->base + RIIC_ICCR2); 222 writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
223 } else {
224 /* Transfer is complete, but do not send STOP */
225 riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
226 complete(&riic->msg_done);
223 } 227 }
224 228
225 return IRQ_HANDLED; 229 return IRQ_HANDLED;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 83768e85a919..2178266bca79 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
429 while (muxc->num_adapters) { 429 while (muxc->num_adapters) {
430 struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters]; 430 struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
431 struct i2c_mux_priv *priv = adap->algo_data; 431 struct i2c_mux_priv *priv = adap->algo_data;
432 struct device_node *np = adap->dev.of_node;
432 433
433 muxc->adapter[muxc->num_adapters] = NULL; 434 muxc->adapter[muxc->num_adapters] = NULL;
434 435
@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
438 439
439 sysfs_remove_link(&priv->adap.dev.kobj, "mux_device"); 440 sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
440 i2c_del_adapter(adap); 441 i2c_del_adapter(adap);
442 of_node_put(np);
441 kfree(priv); 443 kfree(priv);
442 } 444 }
443} 445}
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 1eef56a89b1f..f96601268f71 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
198 198
199static int __init crossbar_of_init(struct device_node *node) 199static int __init crossbar_of_init(struct device_node *node)
200{ 200{
201 int i, size, max = 0, reserved = 0, entry; 201 u32 max = 0, entry, reg_size;
202 int i, size, reserved = 0;
202 const __be32 *irqsr; 203 const __be32 *irqsr;
203 int ret = -ENOMEM; 204 int ret = -ENOMEM;
204 205
@@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
275 if (!cb->register_offsets) 276 if (!cb->register_offsets)
276 goto err_irq_map; 277 goto err_irq_map;
277 278
278 of_property_read_u32(node, "ti,reg-size", &size); 279 of_property_read_u32(node, "ti,reg-size", &reg_size);
279 280
280 switch (size) { 281 switch (reg_size) {
281 case 1: 282 case 1:
282 cb->write = crossbar_writeb; 283 cb->write = crossbar_writeb;
283 break; 284 break;
@@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
303 continue; 304 continue;
304 305
305 cb->register_offsets[i] = reserved; 306 cb->register_offsets[i] = reserved;
306 reserved += size; 307 reserved += reg_size;
307 } 308 }
308 309
309 of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map); 310 of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 23201004fd7a..f77f840d2b5f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
1601 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; 1601 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
1602} 1602}
1603 1603
1604static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
1605{
1606 struct its_node *its = data;
1607
1608 /* On QDF2400, the size of the ITE is 16Bytes */
1609 its->ite_size = 16;
1610}
1611
1604static const struct gic_quirk its_quirks[] = { 1612static const struct gic_quirk its_quirks[] = {
1605#ifdef CONFIG_CAVIUM_ERRATUM_22375 1613#ifdef CONFIG_CAVIUM_ERRATUM_22375
1606 { 1614 {
@@ -1618,6 +1626,14 @@ static const struct gic_quirk its_quirks[] = {
1618 .init = its_enable_quirk_cavium_23144, 1626 .init = its_enable_quirk_cavium_23144,
1619 }, 1627 },
1620#endif 1628#endif
1629#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
1630 {
1631 .desc = "ITS: QDF2400 erratum 0065",
1632 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
1633 .mask = 0xffffffff,
1634 .init = its_enable_quirk_qdf2400_e0065,
1635 },
1636#endif
1621 { 1637 {
1622 } 1638 }
1623}; 1639};
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 11e13c56126f..2da3ff650e1d 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
2317 return -ENODEV; 2317 return -ENODEV;
2318 } 2318 }
2319 2319
2320 if (hostif->desc.bNumEndpoints < 1)
2321 return -ENODEV;
2322
2320 dev_info(&udev->dev, 2323 dev_info(&udev->dev,
2321 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", 2324 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
2322 __func__, le16_to_cpu(udev->descriptor.idVendor), 2325 __func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 409849165838..f64a36007800 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
239 } 239 }
240 } 240 }
241 } else { 241 } else {
242 // Disble B channel interrupts 242 // Disable B channel interrupts
243 st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); 243 st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
244 244
245 // Disable B channel FIFOs 245 // Disable B channel FIFOs
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 3f041b187033..f757cef293f8 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
392 * To get all the fields, copy all archdata 392 * To get all the fields, copy all archdata
393 */ 393 */
394 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; 394 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
395 dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
395#endif /* CONFIG_PCI */ 396#endif /* CONFIG_PCI */
396 397
397#ifdef DEBUG 398#ifdef DEBUG
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index a126919ed102..5d13930f0f22 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,7 +4,6 @@
4 4
5#include <linux/blkdev.h> 5#include <linux/blkdev.h>
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/blkdev.h>
8#include <linux/kernel.h> 7#include <linux/kernel.h>
9#include <linux/sched/clock.h> 8#include <linux/sched/clock.h>
10#include <linux/llist.h> 9#include <linux/llist.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4ffd1eb8f44..dfb75979e455 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
989 struct dm_offload *o = container_of(cb, struct dm_offload, cb); 989 struct dm_offload *o = container_of(cb, struct dm_offload, cb);
990 struct bio_list list; 990 struct bio_list list;
991 struct bio *bio; 991 struct bio *bio;
992 int i;
992 993
993 INIT_LIST_HEAD(&o->cb.list); 994 INIT_LIST_HEAD(&o->cb.list);
994 995
995 if (unlikely(!current->bio_list)) 996 if (unlikely(!current->bio_list))
996 return; 997 return;
997 998
998 list = *current->bio_list; 999 for (i = 0; i < 2; i++) {
999 bio_list_init(current->bio_list); 1000 list = current->bio_list[i];
1000 1001 bio_list_init(&current->bio_list[i]);
1001 while ((bio = bio_list_pop(&list))) { 1002
1002 struct bio_set *bs = bio->bi_pool; 1003 while ((bio = bio_list_pop(&list))) {
1003 if (unlikely(!bs) || bs == fs_bio_set) { 1004 struct bio_set *bs = bio->bi_pool;
1004 bio_list_add(current->bio_list, bio); 1005 if (unlikely(!bs) || bs == fs_bio_set) {
1005 continue; 1006 bio_list_add(&current->bio_list[i], bio);
1007 continue;
1008 }
1009
1010 spin_lock(&bs->rescue_lock);
1011 bio_list_add(&bs->rescue_list, bio);
1012 queue_work(bs->rescue_workqueue, &bs->rescue_work);
1013 spin_unlock(&bs->rescue_lock);
1006 } 1014 }
1007
1008 spin_lock(&bs->rescue_lock);
1009 bio_list_add(&bs->rescue_list, bio);
1010 queue_work(bs->rescue_workqueue, &bs->rescue_work);
1011 spin_unlock(&bs->rescue_lock);
1012 } 1015 }
1013} 1016}
1014 1017
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117fb918..321ecac23027 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
777 bm_lockres->flags |= DLM_LKF_NOQUEUE; 777 bm_lockres->flags |= DLM_LKF_NOQUEUE;
778 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); 778 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
779 if (ret == -EAGAIN) { 779 if (ret == -EAGAIN) {
780 memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
781 s = read_resync_info(mddev, bm_lockres); 780 s = read_resync_info(mddev, bm_lockres);
782 if (s) { 781 if (s) {
783 pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", 782 pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
974 lockres_free(cinfo->bitmap_lockres); 973 lockres_free(cinfo->bitmap_lockres);
975 unlock_all_bitmaps(mddev); 974 unlock_all_bitmaps(mddev);
976 dlm_release_lockspace(cinfo->lockspace, 2); 975 dlm_release_lockspace(cinfo->lockspace, 2);
976 kfree(cinfo);
977 return 0; 977 return 0;
978} 978}
979 979
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 548d1b8014f8..f6ae1d67bcd0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
440} 440}
441EXPORT_SYMBOL(md_flush_request); 441EXPORT_SYMBOL(md_flush_request);
442 442
443void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
444{
445 struct mddev *mddev = cb->data;
446 md_wakeup_thread(mddev->thread);
447 kfree(cb);
448}
449EXPORT_SYMBOL(md_unplug);
450
451static inline struct mddev *mddev_get(struct mddev *mddev) 443static inline struct mddev *mddev_get(struct mddev *mddev)
452{ 444{
453 atomic_inc(&mddev->active); 445 atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1887 } 1879 }
1888 sb = page_address(rdev->sb_page); 1880 sb = page_address(rdev->sb_page);
1889 sb->data_size = cpu_to_le64(num_sectors); 1881 sb->data_size = cpu_to_le64(num_sectors);
1890 sb->super_offset = rdev->sb_start; 1882 sb->super_offset = cpu_to_le64(rdev->sb_start);
1891 sb->sb_csum = calc_sb_1_csum(sb); 1883 sb->sb_csum = calc_sb_1_csum(sb);
1892 do { 1884 do {
1893 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1885 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
2295 /* Check if any mddev parameters have changed */ 2287 /* Check if any mddev parameters have changed */
2296 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2288 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2297 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2289 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2298 (mddev->layout != le64_to_cpu(sb->layout)) || 2290 (mddev->layout != le32_to_cpu(sb->layout)) ||
2299 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2291 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2300 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2292 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2301 return true; 2293 return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6458 mddev->layout = info->layout; 6450 mddev->layout = info->layout;
6459 mddev->chunk_sectors = info->chunk_size >> 9; 6451 mddev->chunk_sectors = info->chunk_size >> 9;
6460 6452
6461 mddev->max_disks = MD_SB_DISKS;
6462
6463 if (mddev->persistent) { 6453 if (mddev->persistent) {
6464 mddev->flags = 0; 6454 mddev->max_disks = MD_SB_DISKS;
6465 mddev->sb_flags = 0; 6455 mddev->flags = 0;
6456 mddev->sb_flags = 0;
6466 } 6457 }
6467 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6458 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6468 6459
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
6533 return -ENOSPC; 6524 return -ENOSPC;
6534 } 6525 }
6535 rv = mddev->pers->resize(mddev, num_sectors); 6526 rv = mddev->pers->resize(mddev, num_sectors);
6536 if (!rv) 6527 if (!rv) {
6537 revalidate_disk(mddev->gendisk); 6528 if (mddev->queue) {
6529 set_capacity(mddev->gendisk, mddev->array_sectors);
6530 revalidate_disk(mddev->gendisk);
6531 }
6532 }
6538 return rv; 6533 return rv;
6539} 6534}
6540 6535
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b8859cbf84b6..dde8ecb760c8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
676extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 676extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
677 struct mddev *mddev); 677 struct mddev *mddev);
678 678
679extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
680extern void md_reload_sb(struct mddev *mddev, int raid_disk); 679extern void md_reload_sb(struct mddev *mddev, int raid_disk);
681extern void md_update_sb(struct mddev *mddev, int force); 680extern void md_update_sb(struct mddev *mddev, int force);
682extern void md_kick_rdev_from_array(struct md_rdev * rdev); 681extern void md_kick_rdev_from_array(struct md_rdev * rdev);
683struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); 682struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
684static inline int mddev_check_plugged(struct mddev *mddev)
685{
686 return !!blk_check_plugged(md_unplug, mddev,
687 sizeof(struct blk_plug_cb));
688}
689 683
690static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) 684static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
691{ 685{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fbc2d7851b49..a34f58772022 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
1027static void freeze_array(struct r1conf *conf, int extra) 1027static void freeze_array(struct r1conf *conf, int extra)
1028{ 1028{
1029 /* Stop sync I/O and normal I/O and wait for everything to 1029 /* Stop sync I/O and normal I/O and wait for everything to
1030 * go quite. 1030 * go quiet.
1031 * This is called in two situations: 1031 * This is called in two situations:
1032 * 1) management command handlers (reshape, remove disk, quiesce). 1032 * 1) management command handlers (reshape, remove disk, quiesce).
1033 * 2) one normal I/O request failed. 1033 * 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1587 split = bio; 1587 split = bio;
1588 } 1588 }
1589 1589
1590 if (bio_data_dir(split) == READ) 1590 if (bio_data_dir(split) == READ) {
1591 raid1_read_request(mddev, split); 1591 raid1_read_request(mddev, split);
1592 else 1592
1593 /*
1594 * If a bio is splitted, the first part of bio will
1595 * pass barrier but the bio is queued in
1596 * current->bio_list (see generic_make_request). If
1597 * there is a raise_barrier() called here, the second
1598 * part of bio can't pass barrier. But since the first
1599 * part bio isn't dispatched to underlaying disks yet,
1600 * the barrier is never released, hence raise_barrier
1601 * will alays wait. We have a deadlock.
1602 * Note, this only happens in read path. For write
1603 * path, the first part of bio is dispatched in a
1604 * schedule() call (because of blk plug) or offloaded
1605 * to raid10d.
1606 * Quitting from the function immediately can change
1607 * the bio order queued in bio_list and avoid the deadlock.
1608 */
1609 if (split != bio) {
1610 generic_make_request(bio);
1611 break;
1612 }
1613 } else
1593 raid1_write_request(mddev, split); 1614 raid1_write_request(mddev, split);
1594 } while (split != bio); 1615 } while (split != bio);
1595} 1616}
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
3246 return ret; 3267 return ret;
3247 } 3268 }
3248 md_set_array_sectors(mddev, newsize); 3269 md_set_array_sectors(mddev, newsize);
3249 set_capacity(mddev->gendisk, mddev->array_sectors);
3250 revalidate_disk(mddev->gendisk);
3251 if (sectors > mddev->dev_sectors && 3270 if (sectors > mddev->dev_sectors &&
3252 mddev->recovery_cp > mddev->dev_sectors) { 3271 mddev->recovery_cp > mddev->dev_sectors) {
3253 mddev->recovery_cp = mddev->dev_sectors; 3272 mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 063c43d83b72..e89a8d78a9ed 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
974 !conf->barrier || 974 !conf->barrier ||
975 (atomic_read(&conf->nr_pending) && 975 (atomic_read(&conf->nr_pending) &&
976 current->bio_list && 976 current->bio_list &&
977 !bio_list_empty(current->bio_list)), 977 (!bio_list_empty(&current->bio_list[0]) ||
978 !bio_list_empty(&current->bio_list[1]))),
978 conf->resync_lock); 979 conf->resync_lock);
979 conf->nr_waiting--; 980 conf->nr_waiting--;
980 if (!conf->nr_waiting) 981 if (!conf->nr_waiting)
@@ -1477,11 +1478,24 @@ retry_write:
1477 mbio->bi_bdev = (void*)rdev; 1478 mbio->bi_bdev = (void*)rdev;
1478 1479
1479 atomic_inc(&r10_bio->remaining); 1480 atomic_inc(&r10_bio->remaining);
1481
1482 cb = blk_check_plugged(raid10_unplug, mddev,
1483 sizeof(*plug));
1484 if (cb)
1485 plug = container_of(cb, struct raid10_plug_cb,
1486 cb);
1487 else
1488 plug = NULL;
1480 spin_lock_irqsave(&conf->device_lock, flags); 1489 spin_lock_irqsave(&conf->device_lock, flags);
1481 bio_list_add(&conf->pending_bio_list, mbio); 1490 if (plug) {
1482 conf->pending_count++; 1491 bio_list_add(&plug->pending, mbio);
1492 plug->pending_cnt++;
1493 } else {
1494 bio_list_add(&conf->pending_bio_list, mbio);
1495 conf->pending_count++;
1496 }
1483 spin_unlock_irqrestore(&conf->device_lock, flags); 1497 spin_unlock_irqrestore(&conf->device_lock, flags);
1484 if (!mddev_check_plugged(mddev)) 1498 if (!plug)
1485 md_wakeup_thread(mddev->thread); 1499 md_wakeup_thread(mddev->thread);
1486 } 1500 }
1487 } 1501 }
@@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1571 split = bio; 1585 split = bio;
1572 } 1586 }
1573 1587
1588 /*
1589 * If a bio is splitted, the first part of bio will pass
1590 * barrier but the bio is queued in current->bio_list (see
1591 * generic_make_request). If there is a raise_barrier() called
1592 * here, the second part of bio can't pass barrier. But since
1593 * the first part bio isn't dispatched to underlaying disks
1594 * yet, the barrier is never released, hence raise_barrier will
1595 * alays wait. We have a deadlock.
1596 * Note, this only happens in read path. For write path, the
1597 * first part of bio is dispatched in a schedule() call
1598 * (because of blk plug) or offloaded to raid10d.
1599 * Quitting from the function immediately can change the bio
1600 * order queued in bio_list and avoid the deadlock.
1601 */
1574 __make_request(mddev, split); 1602 __make_request(mddev, split);
1603 if (split != bio && bio_data_dir(bio) == READ) {
1604 generic_make_request(bio);
1605 break;
1606 }
1575 } while (split != bio); 1607 } while (split != bio);
1576 1608
1577 /* In case raid10d snuck in to freeze_array */ 1609 /* In case raid10d snuck in to freeze_array */
@@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
3943 return ret; 3975 return ret;
3944 } 3976 }
3945 md_set_array_sectors(mddev, size); 3977 md_set_array_sectors(mddev, size);
3946 if (mddev->queue) {
3947 set_capacity(mddev->gendisk, mddev->array_sectors);
3948 revalidate_disk(mddev->gendisk);
3949 }
3950 if (sectors > mddev->dev_sectors && 3978 if (sectors > mddev->dev_sectors &&
3951 mddev->recovery_cp > oldsize) { 3979 mddev->recovery_cp > oldsize) {
3952 mddev->recovery_cp = oldsize; 3980 mddev->recovery_cp = oldsize;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4fb09b3fcb41..ed5cd705b985 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
1401 (test_bit(R5_Wantdrain, &dev->flags) || 1401 (test_bit(R5_Wantdrain, &dev->flags) ||
1402 test_bit(R5_InJournal, &dev->flags))) || 1402 test_bit(R5_InJournal, &dev->flags))) ||
1403 (srctype == SYNDROME_SRC_WRITTEN && 1403 (srctype == SYNDROME_SRC_WRITTEN &&
1404 dev->written)) { 1404 (dev->written ||
1405 test_bit(R5_InJournal, &dev->flags)))) {
1405 if (test_bit(R5_InJournal, &dev->flags)) 1406 if (test_bit(R5_InJournal, &dev->flags))
1406 srcs[slot] = sh->dev[i].orig_page; 1407 srcs[slot] = sh->dev[i].orig_page;
1407 else 1408 else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
7605 return ret; 7606 return ret;
7606 } 7607 }
7607 md_set_array_sectors(mddev, newsize); 7608 md_set_array_sectors(mddev, newsize);
7608 set_capacity(mddev->gendisk, mddev->array_sectors);
7609 revalidate_disk(mddev->gendisk);
7610 if (sectors > mddev->dev_sectors && 7609 if (sectors > mddev->dev_sectors &&
7611 mddev->recovery_cp > mddev->dev_sectors) { 7610 mddev->recovery_cp > mddev->dev_sectors) {
7612 mddev->recovery_cp = mddev->dev_sectors; 7611 mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 7a681d8202c7..4442e478db72 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
256* 256*
257* The actual DAP implementation may be restricted to only one of the modes. 257* The actual DAP implementation may be restricted to only one of the modes.
258* A compiler warning or error will be generated if the DAP implementation 258* A compiler warning or error will be generated if the DAP implementation
259* overides or cannot handle the mode defined below. 259* overrides or cannot handle the mode defined below.
260*
261*/ 260*/
262#ifndef DRXDAP_SINGLE_MASTER 261#ifndef DRXDAP_SINGLE_MASTER
263#define DRXDAP_SINGLE_MASTER 1 262#define DRXDAP_SINGLE_MASTER 1
@@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
272* 271*
273* This maximum size may be restricted by the actual DAP implementation. 272* This maximum size may be restricted by the actual DAP implementation.
274* A compiler warning or error will be generated if the DAP implementation 273* A compiler warning or error will be generated if the DAP implementation
275* overides or cannot handle the chunksize defined below. 274* overrides or cannot handle the chunksize defined below.
276* 275*
277* Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data 276* Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data
278* buffer. Do not undefine or choose too large, unless your system is able to 277* buffer. Do not undefine or choose too large, unless your system is able to
@@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
292* 291*
293* This maximum size may be restricted by the actual DAP implementation. 292* This maximum size may be restricted by the actual DAP implementation.
294* A compiler warning or error will be generated if the DAP implementation 293* A compiler warning or error will be generated if the DAP implementation
295* overides or cannot handle the chunksize defined below. 294* overrides or cannot handle the chunksize defined below.
296*
297*/ 295*/
298#ifndef DRXDAP_MAX_RCHUNKSIZE 296#ifndef DRXDAP_MAX_RCHUNKSIZE
299#define DRXDAP_MAX_RCHUNKSIZE 60 297#define DRXDAP_MAX_RCHUNKSIZE 60
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index b4b583f7137a..b4c0f10fc3b0 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
54/** 54/**
55 * vsp1_du_setup_lif - Setup the output part of the VSP pipeline 55 * vsp1_du_setup_lif - Setup the output part of the VSP pipeline
56 * @dev: the VSP device 56 * @dev: the VSP device
57 * @width: output frame width in pixels 57 * @cfg: the LIF configuration
58 * @height: output frame height in pixels
59 * 58 *
60 * Configure the output part of VSP DRM pipeline for the given frame @width and 59 * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
61 * @height. This sets up formats on the BRU source pad, the WPF0 sink and source 60 * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink
62 * pads, and the LIF sink pad. 61 * and source pads, and the LIF sink pad.
63 * 62 *
64 * As the media bus code on the BRU source pad is conditioned by the 63 * As the media bus code on the BRU source pad is conditioned by the
65 * configuration of the BRU sink 0 pad, we also set up the formats on all BRU 64 * configuration of the BRU sink 0 pad, we also set up the formats on all BRU
@@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
69 * 68 *
70 * Return 0 on success or a negative error code on failure. 69 * Return 0 on success or a negative error code on failure.
71 */ 70 */
72int vsp1_du_setup_lif(struct device *dev, unsigned int width, 71int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
73 unsigned int height)
74{ 72{
75 struct vsp1_device *vsp1 = dev_get_drvdata(dev); 73 struct vsp1_device *vsp1 = dev_get_drvdata(dev);
76 struct vsp1_pipeline *pipe = &vsp1->drm->pipe; 74 struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
@@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
79 unsigned int i; 77 unsigned int i;
80 int ret; 78 int ret;
81 79
82 dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n", 80 if (!cfg) {
83 __func__, width, height); 81 /* NULL configuration means the CRTC is being disabled, stop
84
85 if (width == 0 || height == 0) {
86 /* Zero width or height means the CRTC is being disabled, stop
87 * the pipeline and turn the light off. 82 * the pipeline and turn the light off.
88 */ 83 */
89 ret = vsp1_pipeline_stop(pipe); 84 ret = vsp1_pipeline_stop(pipe);
@@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
108 return 0; 103 return 0;
109 } 104 }
110 105
106 dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
107 __func__, cfg->width, cfg->height);
108
111 /* Configure the format at the BRU sinks and propagate it through the 109 /* Configure the format at the BRU sinks and propagate it through the
112 * pipeline. 110 * pipeline.
113 */ 111 */
@@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
117 for (i = 0; i < bru->entity.source_pad; ++i) { 115 for (i = 0; i < bru->entity.source_pad; ++i) {
118 format.pad = i; 116 format.pad = i;
119 117
120 format.format.width = width; 118 format.format.width = cfg->width;
121 format.format.height = height; 119 format.format.height = cfg->height;
122 format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; 120 format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
123 format.format.field = V4L2_FIELD_NONE; 121 format.format.field = V4L2_FIELD_NONE;
124 122
@@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
133 } 131 }
134 132
135 format.pad = bru->entity.source_pad; 133 format.pad = bru->entity.source_pad;
136 format.format.width = width; 134 format.format.width = cfg->width;
137 format.format.height = height; 135 format.format.height = cfg->height;
138 format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; 136 format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
139 format.format.field = V4L2_FIELD_NONE; 137 format.format.field = V4L2_FIELD_NONE;
140 138
@@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
180 /* Verify that the format at the output of the pipeline matches the 178 /* Verify that the format at the output of the pipeline matches the
181 * requested frame size and media bus code. 179 * requested frame size and media bus code.
182 */ 180 */
183 if (format.format.width != width || format.format.height != height || 181 if (format.format.width != cfg->width ||
182 format.format.height != cfg->height ||
184 format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { 183 format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
185 dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); 184 dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
186 return -EPIPE; 185 return -EPIPE;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 393dccaabdd0..1688893a65bb 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
436 return -ERESTARTSYS; 436 return -ERESTARTSYS;
437 437
438 ir = irctls[iminor(inode)]; 438 ir = irctls[iminor(inode)];
439 mutex_unlock(&lirc_dev_lock);
440
439 if (!ir) { 441 if (!ir) {
440 retval = -ENODEV; 442 retval = -ENODEV;
441 goto error; 443 goto error;
@@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
476 } 478 }
477 479
478error: 480error:
479 mutex_unlock(&lirc_dev_lock);
480
481 nonseekable_open(inode, file); 481 nonseekable_open(inode, file);
482 482
483 return retval; 483 return retval;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index b109f8246b96..ec4b25bd2ec2 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
176{ 176{
177 u8 tolerance, config; 177 u8 tolerance, config;
178 struct nvt_dev *nvt = dev->priv; 178 struct nvt_dev *nvt = dev->priv;
179 unsigned long flags;
179 int i; 180 int i;
180 181
181 /* hardcode the tolerance to 10% */ 182 /* hardcode the tolerance to 10% */
182 tolerance = DIV_ROUND_UP(count, 10); 183 tolerance = DIV_ROUND_UP(count, 10);
183 184
184 spin_lock(&nvt->lock); 185 spin_lock_irqsave(&nvt->lock, flags);
185 186
186 nvt_clear_cir_wake_fifo(nvt); 187 nvt_clear_cir_wake_fifo(nvt);
187 nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP); 188 nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
203 204
204 nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON); 205 nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
205 206
206 spin_unlock(&nvt->lock); 207 spin_unlock_irqrestore(&nvt->lock, flags);
207} 208}
208 209
209static ssize_t wakeup_data_show(struct device *dev, 210static ssize_t wakeup_data_show(struct device *dev,
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 2424946740e6..d84533699668 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev)
1663{ 1663{
1664 int rc; 1664 int rc;
1665 struct rc_map *rc_map; 1665 struct rc_map *rc_map;
1666 u64 rc_type;
1666 1667
1667 if (!dev->map_name) 1668 if (!dev->map_name)
1668 return -EINVAL; 1669 return -EINVAL;
@@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev)
1677 if (rc) 1678 if (rc)
1678 return rc; 1679 return rc;
1679 1680
1680 if (dev->change_protocol) { 1681 rc_type = BIT_ULL(rc_map->rc_type);
1681 u64 rc_type = (1ll << rc_map->rc_type);
1682 1682
1683 if (dev->change_protocol) {
1683 rc = dev->change_protocol(dev, &rc_type); 1684 rc = dev->change_protocol(dev, &rc_type);
1684 if (rc < 0) 1685 if (rc < 0)
1685 goto out_table; 1686 goto out_table;
1686 dev->enabled_protocols = rc_type; 1687 dev->enabled_protocols = rc_type;
1687 } 1688 }
1688 1689
1690 if (dev->driver_type == RC_DRIVER_IR_RAW)
1691 ir_raw_load_modules(&rc_type);
1692
1689 set_bit(EV_KEY, dev->input_dev->evbit); 1693 set_bit(EV_KEY, dev->input_dev->evbit);
1690 set_bit(EV_REP, dev->input_dev->evbit); 1694 set_bit(EV_REP, dev->input_dev->evbit);
1691 set_bit(EV_MSC, dev->input_dev->evbit); 1695 set_bit(EV_MSC, dev->input_dev->evbit);
@@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev)
1777 dev->input_name ?: "Unspecified device", path ?: "N/A"); 1781 dev->input_name ?: "Unspecified device", path ?: "N/A");
1778 kfree(path); 1782 kfree(path);
1779 1783
1780 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
1781 rc = rc_setup_rx_device(dev);
1782 if (rc)
1783 goto out_dev;
1784 }
1785
1786 if (dev->driver_type == RC_DRIVER_IR_RAW || 1784 if (dev->driver_type == RC_DRIVER_IR_RAW ||
1787 dev->driver_type == RC_DRIVER_IR_RAW_TX) { 1785 dev->driver_type == RC_DRIVER_IR_RAW_TX) {
1788 if (!raw_init) { 1786 if (!raw_init) {
@@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev)
1791 } 1789 }
1792 rc = ir_raw_event_register(dev); 1790 rc = ir_raw_event_register(dev);
1793 if (rc < 0) 1791 if (rc < 0)
1794 goto out_rx; 1792 goto out_dev;
1793 }
1794
1795 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
1796 rc = rc_setup_rx_device(dev);
1797 if (rc)
1798 goto out_raw;
1795 } 1799 }
1796 1800
1797 /* Allow the RC sysfs nodes to be accessible */ 1801 /* Allow the RC sysfs nodes to be accessible */
@@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev)
1803 1807
1804 return 0; 1808 return 0;
1805 1809
1806out_rx: 1810out_raw:
1807 rc_free_rx_device(dev); 1811 ir_raw_event_unregister(dev);
1808out_dev: 1812out_dev:
1809 device_del(&dev->dev); 1813 device_del(&dev->dev);
1810out_unlock: 1814out_unlock:
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 923fb2299553..41b54e40176c 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg)
487 ir_raw_event_handle(serial_ir.rcdev); 487 ir_raw_event_handle(serial_ir.rcdev);
488} 488}
489 489
490/* Needed by serial_ir_probe() */
491static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
492 unsigned int count);
493static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle);
494static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier);
495static int serial_ir_open(struct rc_dev *rcdev);
496static void serial_ir_close(struct rc_dev *rcdev);
497
490static int serial_ir_probe(struct platform_device *dev) 498static int serial_ir_probe(struct platform_device *dev)
491{ 499{
500 struct rc_dev *rcdev;
492 int i, nlow, nhigh, result; 501 int i, nlow, nhigh, result;
493 502
503 rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW);
504 if (!rcdev)
505 return -ENOMEM;
506
507 if (hardware[type].send_pulse && hardware[type].send_space)
508 rcdev->tx_ir = serial_ir_tx;
509 if (hardware[type].set_send_carrier)
510 rcdev->s_tx_carrier = serial_ir_tx_carrier;
511 if (hardware[type].set_duty_cycle)
512 rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
513
514 switch (type) {
515 case IR_HOMEBREW:
516 rcdev->input_name = "Serial IR type home-brew";
517 break;
518 case IR_IRDEO:
519 rcdev->input_name = "Serial IR type IRdeo";
520 break;
521 case IR_IRDEO_REMOTE:
522 rcdev->input_name = "Serial IR type IRdeo remote";
523 break;
524 case IR_ANIMAX:
525 rcdev->input_name = "Serial IR type AnimaX";
526 break;
527 case IR_IGOR:
528 rcdev->input_name = "Serial IR type IgorPlug";
529 break;
530 }
531
532 rcdev->input_phys = KBUILD_MODNAME "/input0";
533 rcdev->input_id.bustype = BUS_HOST;
534 rcdev->input_id.vendor = 0x0001;
535 rcdev->input_id.product = 0x0001;
536 rcdev->input_id.version = 0x0100;
537 rcdev->open = serial_ir_open;
538 rcdev->close = serial_ir_close;
539 rcdev->dev.parent = &serial_ir.pdev->dev;
540 rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
541 rcdev->driver_name = KBUILD_MODNAME;
542 rcdev->map_name = RC_MAP_RC6_MCE;
543 rcdev->min_timeout = 1;
544 rcdev->timeout = IR_DEFAULT_TIMEOUT;
545 rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
546 rcdev->rx_resolution = 250000;
547
548 serial_ir.rcdev = rcdev;
549
550 setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
551 (unsigned long)&serial_ir);
552
494 result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler, 553 result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
495 share_irq ? IRQF_SHARED : 0, 554 share_irq ? IRQF_SHARED : 0,
496 KBUILD_MODNAME, &hardware); 555 KBUILD_MODNAME, &hardware);
@@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev)
516 return -EBUSY; 575 return -EBUSY;
517 } 576 }
518 577
519 setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
520 (unsigned long)&serial_ir);
521
522 result = hardware_init_port(); 578 result = hardware_init_port();
523 if (result < 0) 579 if (result < 0)
524 return result; 580 return result;
@@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev)
552 sense ? "low" : "high"); 608 sense ? "low" : "high");
553 609
554 dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io); 610 dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
555 return 0; 611
612 return devm_rc_register_device(&dev->dev, rcdev);
556} 613}
557 614
558static int serial_ir_open(struct rc_dev *rcdev) 615static int serial_ir_open(struct rc_dev *rcdev)
@@ -723,7 +780,6 @@ static void serial_ir_exit(void)
723 780
724static int __init serial_ir_init_module(void) 781static int __init serial_ir_init_module(void)
725{ 782{
726 struct rc_dev *rcdev;
727 int result; 783 int result;
728 784
729 switch (type) { 785 switch (type) {
@@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void)
754 sense = !!sense; 810 sense = !!sense;
755 811
756 result = serial_ir_init(); 812 result = serial_ir_init();
757 if (result)
758 return result;
759
760 rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW);
761 if (!rcdev) {
762 result = -ENOMEM;
763 goto serial_cleanup;
764 }
765
766 if (hardware[type].send_pulse && hardware[type].send_space)
767 rcdev->tx_ir = serial_ir_tx;
768 if (hardware[type].set_send_carrier)
769 rcdev->s_tx_carrier = serial_ir_tx_carrier;
770 if (hardware[type].set_duty_cycle)
771 rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
772
773 switch (type) {
774 case IR_HOMEBREW:
775 rcdev->input_name = "Serial IR type home-brew";
776 break;
777 case IR_IRDEO:
778 rcdev->input_name = "Serial IR type IRdeo";
779 break;
780 case IR_IRDEO_REMOTE:
781 rcdev->input_name = "Serial IR type IRdeo remote";
782 break;
783 case IR_ANIMAX:
784 rcdev->input_name = "Serial IR type AnimaX";
785 break;
786 case IR_IGOR:
787 rcdev->input_name = "Serial IR type IgorPlug";
788 break;
789 }
790
791 rcdev->input_phys = KBUILD_MODNAME "/input0";
792 rcdev->input_id.bustype = BUS_HOST;
793 rcdev->input_id.vendor = 0x0001;
794 rcdev->input_id.product = 0x0001;
795 rcdev->input_id.version = 0x0100;
796 rcdev->open = serial_ir_open;
797 rcdev->close = serial_ir_close;
798 rcdev->dev.parent = &serial_ir.pdev->dev;
799 rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
800 rcdev->driver_name = KBUILD_MODNAME;
801 rcdev->map_name = RC_MAP_RC6_MCE;
802 rcdev->min_timeout = 1;
803 rcdev->timeout = IR_DEFAULT_TIMEOUT;
804 rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
805 rcdev->rx_resolution = 250000;
806
807 serial_ir.rcdev = rcdev;
808
809 result = rc_register_device(rcdev);
810
811 if (!result) 813 if (!result)
812 return 0; 814 return 0;
813serial_cleanup: 815
814 serial_ir_exit(); 816 serial_ir_exit();
815 return result; 817 return result;
816} 818}
@@ -818,7 +820,6 @@ serial_cleanup:
818static void __exit serial_ir_exit_module(void) 820static void __exit serial_ir_exit_module(void)
819{ 821{
820 del_timer_sync(&serial_ir.timeout_timer); 822 del_timer_sync(&serial_ir.timeout_timer);
821 rc_unregister_device(serial_ir.rcdev);
822 serial_ir_exit(); 823 serial_ir_exit();
823} 824}
824 825
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6ca502d834b4..4f42d57f81d9 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -68,6 +68,7 @@
68struct dw2102_state { 68struct dw2102_state {
69 u8 initialized; 69 u8 initialized;
70 u8 last_lock; 70 u8 last_lock;
71 u8 data[MAX_XFER_SIZE + 4];
71 struct i2c_client *i2c_client_demod; 72 struct i2c_client *i2c_client_demod;
72 struct i2c_client *i2c_client_tuner; 73 struct i2c_client *i2c_client_tuner;
73 74
@@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
661 int num) 662 int num)
662{ 663{
663 struct dvb_usb_device *d = i2c_get_adapdata(adap); 664 struct dvb_usb_device *d = i2c_get_adapdata(adap);
664 u8 obuf[0x40], ibuf[0x40]; 665 struct dw2102_state *state;
665 666
666 if (!d) 667 if (!d)
667 return -ENODEV; 668 return -ENODEV;
669
670 state = d->priv;
671
668 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 672 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
669 return -EAGAIN; 673 return -EAGAIN;
674 if (mutex_lock_interruptible(&d->data_mutex) < 0) {
675 mutex_unlock(&d->i2c_mutex);
676 return -EAGAIN;
677 }
670 678
671 switch (num) { 679 switch (num) {
672 case 1: 680 case 1:
673 switch (msg[0].addr) { 681 switch (msg[0].addr) {
674 case SU3000_STREAM_CTRL: 682 case SU3000_STREAM_CTRL:
675 obuf[0] = msg[0].buf[0] + 0x36; 683 state->data[0] = msg[0].buf[0] + 0x36;
676 obuf[1] = 3; 684 state->data[1] = 3;
677 obuf[2] = 0; 685 state->data[2] = 0;
678 if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) 686 if (dvb_usb_generic_rw(d, state->data, 3,
687 state->data, 0, 0) < 0)
679 err("i2c transfer failed."); 688 err("i2c transfer failed.");
680 break; 689 break;
681 case DW2102_RC_QUERY: 690 case DW2102_RC_QUERY:
682 obuf[0] = 0x10; 691 state->data[0] = 0x10;
683 if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) 692 if (dvb_usb_generic_rw(d, state->data, 1,
693 state->data, 2, 0) < 0)
684 err("i2c transfer failed."); 694 err("i2c transfer failed.");
685 msg[0].buf[1] = ibuf[0]; 695 msg[0].buf[1] = state->data[0];
686 msg[0].buf[0] = ibuf[1]; 696 msg[0].buf[0] = state->data[1];
687 break; 697 break;
688 default: 698 default:
689 /* always i2c write*/ 699 /* always i2c write*/
690 obuf[0] = 0x08; 700 state->data[0] = 0x08;
691 obuf[1] = msg[0].addr; 701 state->data[1] = msg[0].addr;
692 obuf[2] = msg[0].len; 702 state->data[2] = msg[0].len;
693 703
694 memcpy(&obuf[3], msg[0].buf, msg[0].len); 704 memcpy(&state->data[3], msg[0].buf, msg[0].len);
695 705
696 if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, 706 if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
697 ibuf, 1, 0) < 0) 707 state->data, 1, 0) < 0)
698 err("i2c transfer failed."); 708 err("i2c transfer failed.");
699 709
700 } 710 }
701 break; 711 break;
702 case 2: 712 case 2:
703 /* always i2c read */ 713 /* always i2c read */
704 obuf[0] = 0x09; 714 state->data[0] = 0x09;
705 obuf[1] = msg[0].len; 715 state->data[1] = msg[0].len;
706 obuf[2] = msg[1].len; 716 state->data[2] = msg[1].len;
707 obuf[3] = msg[0].addr; 717 state->data[3] = msg[0].addr;
708 memcpy(&obuf[4], msg[0].buf, msg[0].len); 718 memcpy(&state->data[4], msg[0].buf, msg[0].len);
709 719
710 if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, 720 if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
711 ibuf, msg[1].len + 1, 0) < 0) 721 state->data, msg[1].len + 1, 0) < 0)
712 err("i2c transfer failed."); 722 err("i2c transfer failed.");
713 723
714 memcpy(msg[1].buf, &ibuf[1], msg[1].len); 724 memcpy(msg[1].buf, &state->data[1], msg[1].len);
715 break; 725 break;
716 default: 726 default:
717 warn("more than 2 i2c messages at a time is not handled yet."); 727 warn("more than 2 i2c messages at a time is not handled yet.");
718 break; 728 break;
719 } 729 }
730 mutex_unlock(&d->data_mutex);
720 mutex_unlock(&d->i2c_mutex); 731 mutex_unlock(&d->i2c_mutex);
721 return num; 732 return num;
722} 733}
@@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
844static int su3000_power_ctrl(struct dvb_usb_device *d, int i) 855static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
845{ 856{
846 struct dw2102_state *state = (struct dw2102_state *)d->priv; 857 struct dw2102_state *state = (struct dw2102_state *)d->priv;
847 u8 obuf[] = {0xde, 0}; 858 int ret = 0;
848 859
849 info("%s: %d, initialized %d", __func__, i, state->initialized); 860 info("%s: %d, initialized %d", __func__, i, state->initialized);
850 861
851 if (i && !state->initialized) { 862 if (i && !state->initialized) {
863 mutex_lock(&d->data_mutex);
864
865 state->data[0] = 0xde;
866 state->data[1] = 0;
867
852 state->initialized = 1; 868 state->initialized = 1;
853 /* reset board */ 869 /* reset board */
854 return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); 870 ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
871 mutex_unlock(&d->data_mutex);
855 } 872 }
856 873
857 return 0; 874 return ret;
858} 875}
859 876
860static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) 877static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
1309 return 0; 1326 return 0;
1310} 1327}
1311 1328
1312static int su3000_frontend_attach(struct dvb_usb_adapter *d) 1329static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
1313{ 1330{
1314 u8 obuf[3] = { 0xe, 0x80, 0 }; 1331 struct dvb_usb_device *d = adap->dev;
1315 u8 ibuf[] = { 0 }; 1332 struct dw2102_state *state = d->priv;
1333
1334 mutex_lock(&d->data_mutex);
1335
1336 state->data[0] = 0xe;
1337 state->data[1] = 0x80;
1338 state->data[2] = 0;
1316 1339
1317 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1340 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1318 err("command 0x0e transfer failed."); 1341 err("command 0x0e transfer failed.");
1319 1342
1320 obuf[0] = 0xe; 1343 state->data[0] = 0xe;
1321 obuf[1] = 0x02; 1344 state->data[1] = 0x02;
1322 obuf[2] = 1; 1345 state->data[2] = 1;
1323 1346
1324 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1347 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1325 err("command 0x0e transfer failed."); 1348 err("command 0x0e transfer failed.");
1326 msleep(300); 1349 msleep(300);
1327 1350
1328 obuf[0] = 0xe; 1351 state->data[0] = 0xe;
1329 obuf[1] = 0x83; 1352 state->data[1] = 0x83;
1330 obuf[2] = 0; 1353 state->data[2] = 0;
1331 1354
1332 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1355 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1333 err("command 0x0e transfer failed."); 1356 err("command 0x0e transfer failed.");
1334 1357
1335 obuf[0] = 0xe; 1358 state->data[0] = 0xe;
1336 obuf[1] = 0x83; 1359 state->data[1] = 0x83;
1337 obuf[2] = 1; 1360 state->data[2] = 1;
1338 1361
1339 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1362 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1340 err("command 0x0e transfer failed."); 1363 err("command 0x0e transfer failed.");
1341 1364
1342 obuf[0] = 0x51; 1365 state->data[0] = 0x51;
1343 1366
1344 if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) 1367 if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
1345 err("command 0x51 transfer failed."); 1368 err("command 0x51 transfer failed.");
1346 1369
1347 d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, 1370 mutex_unlock(&d->data_mutex);
1348 &d->dev->i2c_adap); 1371
1349 if (d->fe_adap[0].fe == NULL) 1372 adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
1373 &d->i2c_adap);
1374 if (adap->fe_adap[0].fe == NULL)
1350 return -EIO; 1375 return -EIO;
1351 1376
1352 if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, 1377 if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
1353 &dw2104_ts2020_config, 1378 &dw2104_ts2020_config,
1354 &d->dev->i2c_adap)) { 1379 &d->i2c_adap)) {
1355 info("Attached DS3000/TS2020!"); 1380 info("Attached DS3000/TS2020!");
1356 return 0; 1381 return 0;
1357 } 1382 }
@@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
1360 return -EIO; 1385 return -EIO;
1361} 1386}
1362 1387
1363static int t220_frontend_attach(struct dvb_usb_adapter *d) 1388static int t220_frontend_attach(struct dvb_usb_adapter *adap)
1364{ 1389{
1365 u8 obuf[3] = { 0xe, 0x87, 0 }; 1390 struct dvb_usb_device *d = adap->dev;
1366 u8 ibuf[] = { 0 }; 1391 struct dw2102_state *state = d->priv;
1392
1393 mutex_lock(&d->data_mutex);
1367 1394
1368 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1395 state->data[0] = 0xe;
1396 state->data[1] = 0x87;
1397 state->data[2] = 0x0;
1398
1399 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1369 err("command 0x0e transfer failed."); 1400 err("command 0x0e transfer failed.");
1370 1401
1371 obuf[0] = 0xe; 1402 state->data[0] = 0xe;
1372 obuf[1] = 0x86; 1403 state->data[1] = 0x86;
1373 obuf[2] = 1; 1404 state->data[2] = 1;
1374 1405
1375 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1406 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1376 err("command 0x0e transfer failed."); 1407 err("command 0x0e transfer failed.");
1377 1408
1378 obuf[0] = 0xe; 1409 state->data[0] = 0xe;
1379 obuf[1] = 0x80; 1410 state->data[1] = 0x80;
1380 obuf[2] = 0; 1411 state->data[2] = 0;
1381 1412
1382 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1413 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1383 err("command 0x0e transfer failed."); 1414 err("command 0x0e transfer failed.");
1384 1415
1385 msleep(50); 1416 msleep(50);
1386 1417
1387 obuf[0] = 0xe; 1418 state->data[0] = 0xe;
1388 obuf[1] = 0x80; 1419 state->data[1] = 0x80;
1389 obuf[2] = 1; 1420 state->data[2] = 1;
1390 1421
1391 if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) 1422 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1392 err("command 0x0e transfer failed."); 1423 err("command 0x0e transfer failed.");
1393 1424
1394 obuf[0] = 0x51; 1425 state->data[0] = 0x51;
1395 1426
1396 if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) 1427 if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
1397 err("command 0x51 transfer failed."); 1428 err("command 0x51 transfer failed.");
1398 1429
1399 d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config, 1430 mutex_unlock(&d->data_mutex);
1400 &d->dev->i2c_adap, NULL); 1431
1401 if (d->fe_adap[0].fe != NULL) { 1432 adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
1402 if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60, 1433 &d->i2c_adap, NULL);
1403 &d->dev->i2c_adap, &tda18271_config)) { 1434 if (adap->fe_adap[0].fe != NULL) {
1435 if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
1436 &d->i2c_adap, &tda18271_config)) {
1404 info("Attached TDA18271HD/CXD2820R!"); 1437 info("Attached TDA18271HD/CXD2820R!");
1405 return 0; 1438 return 0;
1406 } 1439 }
@@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
1410 return -EIO; 1443 return -EIO;
1411} 1444}
1412 1445
1413static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d) 1446static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
1414{ 1447{
1415 u8 obuf[] = { 0x51 }; 1448 struct dvb_usb_device *d = adap->dev;
1416 u8 ibuf[] = { 0 }; 1449 struct dw2102_state *state = d->priv;
1450
1451 mutex_lock(&d->data_mutex);
1417 1452
1418 if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) 1453 state->data[0] = 0x51;
1454
1455 if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
1419 err("command 0x51 transfer failed."); 1456 err("command 0x51 transfer failed.");
1420 1457
1421 d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config, 1458 mutex_unlock(&d->data_mutex);
1422 &d->dev->i2c_adap);
1423 1459
1424 if (d->fe_adap[0].fe == NULL) 1460 adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
1461 &s421_m88rs2000_config,
1462 &d->i2c_adap);
1463
1464 if (adap->fe_adap[0].fe == NULL)
1425 return -EIO; 1465 return -EIO;
1426 1466
1427 if (dvb_attach(ts2020_attach, d->fe_adap[0].fe, 1467 if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
1428 &dw2104_ts2020_config, 1468 &dw2104_ts2020_config,
1429 &d->dev->i2c_adap)) { 1469 &d->i2c_adap)) {
1430 info("Attached RS2000/TS2020!"); 1470 info("Attached RS2000/TS2020!");
1431 return 0; 1471 return 0;
1432 } 1472 }
@@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
1439{ 1479{
1440 struct dvb_usb_device *d = adap->dev; 1480 struct dvb_usb_device *d = adap->dev;
1441 struct dw2102_state *state = d->priv; 1481 struct dw2102_state *state = d->priv;
1442 u8 obuf[3] = { 0xe, 0x80, 0 };
1443 u8 ibuf[] = { 0 };
1444 struct i2c_adapter *i2c_adapter; 1482 struct i2c_adapter *i2c_adapter;
1445 struct i2c_client *client; 1483 struct i2c_client *client;
1446 struct i2c_board_info board_info; 1484 struct i2c_board_info board_info;
1447 struct m88ds3103_platform_data m88ds3103_pdata = {}; 1485 struct m88ds3103_platform_data m88ds3103_pdata = {};
1448 struct ts2020_config ts2020_config = {}; 1486 struct ts2020_config ts2020_config = {};
1449 1487
1450 if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) 1488 mutex_lock(&d->data_mutex);
1489
1490 state->data[0] = 0xe;
1491 state->data[1] = 0x80;
1492 state->data[2] = 0x0;
1493
1494 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1451 err("command 0x0e transfer failed."); 1495 err("command 0x0e transfer failed.");
1452 1496
1453 obuf[0] = 0xe; 1497 state->data[0] = 0xe;
1454 obuf[1] = 0x02; 1498 state->data[1] = 0x02;
1455 obuf[2] = 1; 1499 state->data[2] = 1;
1456 1500
1457 if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) 1501 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1458 err("command 0x0e transfer failed."); 1502 err("command 0x0e transfer failed.");
1459 msleep(300); 1503 msleep(300);
1460 1504
1461 obuf[0] = 0xe; 1505 state->data[0] = 0xe;
1462 obuf[1] = 0x83; 1506 state->data[1] = 0x83;
1463 obuf[2] = 0; 1507 state->data[2] = 0;
1464 1508
1465 if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) 1509 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1466 err("command 0x0e transfer failed."); 1510 err("command 0x0e transfer failed.");
1467 1511
1468 obuf[0] = 0xe; 1512 state->data[0] = 0xe;
1469 obuf[1] = 0x83; 1513 state->data[1] = 0x83;
1470 obuf[2] = 1; 1514 state->data[2] = 1;
1471 1515
1472 if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0) 1516 if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
1473 err("command 0x0e transfer failed."); 1517 err("command 0x0e transfer failed.");
1474 1518
1475 obuf[0] = 0x51; 1519 state->data[0] = 0x51;
1476 1520
1477 if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0) 1521 if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
1478 err("command 0x51 transfer failed."); 1522 err("command 0x51 transfer failed.");
1479 1523
1524 mutex_unlock(&d->data_mutex);
1525
1480 /* attach demod */ 1526 /* attach demod */
1481 m88ds3103_pdata.clk = 27000000; 1527 m88ds3103_pdata.clk = 27000000;
1482 m88ds3103_pdata.i2c_wr_max = 33; 1528 m88ds3103_pdata.i2c_wr_max = 33;
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 6fb773dbcd0c..93be82fc338a 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
219 int write, unsigned long *paddr, int *pageshift) 219 int write, unsigned long *paddr, int *pageshift)
220{ 220{
221 pgd_t *pgdp; 221 pgd_t *pgdp;
222 pmd_t *pmdp; 222 p4d_t *p4dp;
223 pud_t *pudp; 223 pud_t *pudp;
224 pmd_t *pmdp;
224 pte_t pte; 225 pte_t pte;
225 226
226 pgdp = pgd_offset(vma->vm_mm, vaddr); 227 pgdp = pgd_offset(vma->vm_mm, vaddr);
227 if (unlikely(pgd_none(*pgdp))) 228 if (unlikely(pgd_none(*pgdp)))
228 goto err; 229 goto err;
229 230
230 pudp = pud_offset(pgdp, vaddr); 231 p4dp = p4d_offset(pgdp, vaddr);
232 if (unlikely(p4d_none(*p4dp)))
233 goto err;
234
235 pudp = pud_offset(p4dp, vaddr);
231 if (unlikely(pud_none(*pudp))) 236 if (unlikely(pud_none(*pudp)))
232 goto err; 237 goto err;
233 238
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 1ae872bfc3ba..747645c74134 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor)
186} 186}
187 187
188/* 188/*
189 * Send write disble instruction to the chip. 189 * Send write disable instruction to the chip.
190 */ 190 */
191static inline int write_disable(struct spi_nor *nor) 191static inline int write_disable(struct spi_nor *nor)
192{ 192{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 248f60d171a5..ffea9859f5a7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
2272 processed = xgbe_rx_poll(channel, budget); 2272 processed = xgbe_rx_poll(channel, budget);
2273 2273
2274 /* If we processed everything, we are done */ 2274 /* If we processed everything, we are done */
2275 if (processed < budget) { 2275 if ((processed < budget) && napi_complete_done(napi, processed)) {
2276 /* Turn off polling */
2277 napi_complete_done(napi, processed);
2278
2279 /* Enable Tx and Rx interrupts */ 2276 /* Enable Tx and Rx interrupts */
2280 if (pdata->channel_irq_mode) 2277 if (pdata->channel_irq_mode)
2281 xgbe_enable_rx_tx_int(pdata, channel); 2278 xgbe_enable_rx_tx_int(pdata, channel);
@@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
2317 } while ((processed < budget) && (processed != last_processed)); 2314 } while ((processed < budget) && (processed != last_processed));
2318 2315
2319 /* If we processed everything, we are done */ 2316 /* If we processed everything, we are done */
2320 if (processed < budget) { 2317 if ((processed < budget) && napi_complete_done(napi, processed)) {
2321 /* Turn off polling */
2322 napi_complete_done(napi, processed);
2323
2324 /* Enable Tx and Rx interrupts */ 2318 /* Enable Tx and Rx interrupts */
2325 xgbe_enable_rx_tx_ints(pdata); 2319 xgbe_enable_rx_tx_ints(pdata);
2326 } 2320 }
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 581de71a958a..4c6c882c6a1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
213 if (!((1U << i) & self->msix_entry_mask)) 213 if (!((1U << i) & self->msix_entry_mask))
214 continue; 214 continue;
215 215
216 free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
217 if (pdev->msix_enabled) 216 if (pdev->msix_enabled)
218 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); 217 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
218 free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
219 self->msix_entry_mask &= ~(1U << i); 219 self->msix_entry_mask &= ~(1U << i);
220 } 220 }
221} 221}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d8d06fdfc42b..ac76fc251d26 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13292 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 13292 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13293 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 13293 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13294 13294
13295 /* VF with OLD Hypervisor or old PF do not support filtering */
13296 if (IS_PF(bp)) { 13295 if (IS_PF(bp)) {
13297 if (chip_is_e1x) 13296 if (chip_is_e1x)
13298 bp->accept_any_vlan = true; 13297 bp->accept_any_vlan = true;
13299 else 13298 else
13300 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13299 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13301#ifdef CONFIG_BNX2X_SRIOV
13302 } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13303 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13304#endif
13305 } 13300 }
13301 /* For VF we'll know whether to enable VLAN filtering after
13302 * getting a response to CHANNEL_TLV_ACQUIRE from PF.
13303 */
13306 13304
13307 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 13305 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13308 dev->features |= NETIF_F_HIGHDMA; 13306 dev->features |= NETIF_F_HIGHDMA;
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13738 if (!netif_running(bp->dev)) { 13736 if (!netif_running(bp->dev)) {
13739 DP(BNX2X_MSG_PTP, 13737 DP(BNX2X_MSG_PTP,
13740 "PTP adjfreq called while the interface is down\n"); 13738 "PTP adjfreq called while the interface is down\n");
13741 return -EFAULT; 13739 return -ENETDOWN;
13742 } 13740 }
13743 13741
13744 if (ppb < 0) { 13742 if (ppb < 0) {
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13797{ 13795{
13798 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13796 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13799 13797
13798 if (!netif_running(bp->dev)) {
13799 DP(BNX2X_MSG_PTP,
13800 "PTP adjtime called while the interface is down\n");
13801 return -ENETDOWN;
13802 }
13803
13800 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); 13804 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13801 13805
13802 timecounter_adjtime(&bp->timecounter, delta); 13806 timecounter_adjtime(&bp->timecounter, delta);
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13809 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13813 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13810 u64 ns; 13814 u64 ns;
13811 13815
13816 if (!netif_running(bp->dev)) {
13817 DP(BNX2X_MSG_PTP,
13818 "PTP gettime called while the interface is down\n");
13819 return -ENETDOWN;
13820 }
13821
13812 ns = timecounter_read(&bp->timecounter); 13822 ns = timecounter_read(&bp->timecounter);
13813 13823
13814 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); 13824 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13824 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13834 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13825 u64 ns; 13835 u64 ns;
13826 13836
13837 if (!netif_running(bp->dev)) {
13838 DP(BNX2X_MSG_PTP,
13839 "PTP settime called while the interface is down\n");
13840 return -ENETDOWN;
13841 }
13842
13827 ns = timespec64_to_ns(ts); 13843 ns = timespec64_to_ns(ts);
13828 13844
13829 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); 13845 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13991 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 14007 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13992 if (rc) 14008 if (rc)
13993 goto init_one_freemem; 14009 goto init_one_freemem;
14010
14011#ifdef CONFIG_BNX2X_SRIOV
14012 /* VF with OLD Hypervisor or old PF do not support filtering */
14013 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14014 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14015 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14016 }
14017#endif
13994 } 14018 }
13995 14019
13996 /* Enable SRIOV if capability found in configuration space */ 14020 /* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fad22adbbb9..bdfd53b46bc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
434 434
435 /* Add/Remove the filter */ 435 /* Add/Remove the filter */
436 rc = bnx2x_config_vlan_mac(bp, &ramrod); 436 rc = bnx2x_config_vlan_mac(bp, &ramrod);
437 if (rc && rc != -EEXIST) { 437 if (rc == -EEXIST)
438 return 0;
439 if (rc) {
438 BNX2X_ERR("Failed to %s %s\n", 440 BNX2X_ERR("Failed to %s %s\n",
439 filter->add ? "add" : "delete", 441 filter->add ? "add" : "delete",
440 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? 442 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
444 return rc; 446 return rc;
445 } 447 }
446 448
449 filter->applied = true;
450
447 return 0; 451 return 0;
448} 452}
449 453
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
469 /* Rollback if needed */ 473 /* Rollback if needed */
470 if (i != filters->count) { 474 if (i != filters->count) {
471 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 475 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
472 i, filters->count + 1); 476 i, filters->count);
473 while (--i >= 0) { 477 while (--i >= 0) {
478 if (!filters->filters[i].applied)
479 continue;
474 filters->filters[i].add = !filters->filters[i].add; 480 filters->filters[i].add = !filters->filters[i].add;
475 bnx2x_vf_mac_vlan_config(bp, vf, qid, 481 bnx2x_vf_mac_vlan_config(bp, vf, qid,
476 &filters->filters[i], 482 &filters->filters[i],
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1899 continue; 1905 continue;
1900 } 1906 }
1901 1907
1902 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1908 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1909 "add addresses for vf %d\n", vf->abs_vfid);
1903 for_each_vfq(vf, j) { 1910 for_each_vfq(vf, j) {
1904 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1911 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1905 1912
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1920 cpu_to_le32(U64_HI(q_stats_addr)); 1927 cpu_to_le32(U64_HI(q_stats_addr));
1921 cur_query_entry->address.lo = 1928 cur_query_entry->address.lo =
1922 cpu_to_le32(U64_LO(q_stats_addr)); 1929 cpu_to_le32(U64_LO(q_stats_addr));
1923 DP(BNX2X_MSG_IOV, 1930 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1924 "added address %x %x for vf %d queue %d client %d\n", 1931 "added address %x %x for vf %d queue %d client %d\n",
1925 cur_query_entry->address.hi, 1932 cur_query_entry->address.hi,
1926 cur_query_entry->address.lo, cur_query_entry->funcID, 1933 cur_query_entry->address.lo,
1927 j, cur_query_entry->index); 1934 cur_query_entry->funcID,
1935 j, cur_query_entry->index);
1928 cur_query_entry++; 1936 cur_query_entry++;
1929 cur_data_offset += sizeof(struct per_queue_stats); 1937 cur_data_offset += sizeof(struct per_queue_stats);
1930 stats_count++; 1938 stats_count++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406f4c11..888d0b6632e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
114 (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/ 114 (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
115 115
116 bool add; 116 bool add;
117 bool applied;
117 u8 *mac; 118 u8 *mac;
118 u16 vid; 119 u16 vid;
119}; 120};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300cf25f..76a4668c50fe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
868 struct bnx2x *bp = netdev_priv(dev); 868 struct bnx2x *bp = netdev_priv(dev);
869 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; 869 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
870 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 870 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
871 int rc, i = 0; 871 int rc = 0, i = 0;
872 struct netdev_hw_addr *ha; 872 struct netdev_hw_addr *ha;
873 873
874 if (bp->state != BNX2X_STATE_OPEN) { 874 if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
883 /* Get Rx mode requested */ 883 /* Get Rx mode requested */
884 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); 884 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
885 885
886 /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
887 if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
888 DP(NETIF_MSG_IFUP,
889 "VF supports not more than %d multicast MAC addresses\n",
890 PFVF_MAX_MULTICAST_PER_VF);
891 rc = -EINVAL;
892 goto out;
893 }
894
886 netdev_for_each_mc_addr(ha, dev) { 895 netdev_for_each_mc_addr(ha, dev) {
887 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 896 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
888 bnx2x_mc_addr(ha)); 897 bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
890 i++; 899 i++;
891 } 900 }
892 901
893 /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
894 * addresses tops
895 */
896 if (i >= PFVF_MAX_MULTICAST_PER_VF) {
897 DP(NETIF_MSG_IFUP,
898 "VF supports not more than %d multicast MAC addresses\n",
899 PFVF_MAX_MULTICAST_PER_VF);
900 return -EINVAL;
901 }
902
903 req->n_multicast = i; 902 req->n_multicast = i;
904 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED; 903 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
905 req->vf_qid = 0; 904 req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
924out: 923out:
925 bnx2x_vfpf_finalize(bp, &req->first_tlv); 924 bnx2x_vfpf_finalize(bp, &req->first_tlv);
926 925
927 return 0; 926 return rc;
928} 927}
929 928
930/* request pf to add a vlan for the vf */ 929/* request pf to add a vlan for the vf */
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1778 goto op_err; 1777 goto op_err;
1779 } 1778 }
1780 1779
1780 /* build vlan list */
1781 fl = NULL;
1782
1783 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1784 VFPF_VLAN_FILTER);
1785 if (rc)
1786 goto op_err;
1787
1788 if (fl) {
1789 /* set vlan list */
1790 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1791 msg->vf_qid,
1792 false);
1793 if (rc)
1794 goto op_err;
1795 }
1796
1781 } 1797 }
1782 1798
1783 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1799 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 235733e91c79..32de4589d16a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4465 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 4465 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4466 } 4466 }
4467#endif 4467#endif
4468 if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
4469 FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
4470 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4471
4468 switch (resp->port_partition_type) { 4472 switch (resp->port_partition_type) {
4469 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 4473 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4470 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 4474 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5507 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 5511 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5508 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 5512 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5509 } 5513 }
5510 link_info->support_auto_speeds = 5514 if (resp->supported_speeds_auto_mode)
5511 le16_to_cpu(resp->supported_speeds_auto_mode); 5515 link_info->support_auto_speeds =
5516 le16_to_cpu(resp->supported_speeds_auto_mode);
5512 5517
5513hwrm_phy_qcaps_exit: 5518hwrm_phy_qcaps_exit:
5514 mutex_unlock(&bp->hwrm_cmd_lock); 5519 mutex_unlock(&bp->hwrm_cmd_lock);
@@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
6495 if (!silent) 6500 if (!silent)
6496 bnxt_dbg_dump_states(bp); 6501 bnxt_dbg_dump_states(bp);
6497 if (netif_running(bp->dev)) { 6502 if (netif_running(bp->dev)) {
6503 int rc;
6504
6505 if (!silent)
6506 bnxt_ulp_stop(bp);
6498 bnxt_close_nic(bp, false, false); 6507 bnxt_close_nic(bp, false, false);
6499 bnxt_open_nic(bp, false, false); 6508 rc = bnxt_open_nic(bp, false, false);
6509 if (!silent && !rc)
6510 bnxt_ulp_start(bp);
6500 } 6511 }
6501} 6512}
6502 6513
@@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7444 if (rc) 7455 if (rc)
7445 goto init_err_pci_clean; 7456 goto init_err_pci_clean;
7446 7457
7458 rc = bnxt_hwrm_func_reset(bp);
7459 if (rc)
7460 goto init_err_pci_clean;
7461
7447 bnxt_hwrm_fw_set_time(bp); 7462 bnxt_hwrm_fw_set_time(bp);
7448 7463
7449 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 7464 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7554 if (rc) 7569 if (rc)
7555 goto init_err_pci_clean; 7570 goto init_err_pci_clean;
7556 7571
7557 rc = bnxt_hwrm_func_reset(bp);
7558 if (rc)
7559 goto init_err_pci_clean;
7560
7561 rc = bnxt_init_int_mode(bp); 7572 rc = bnxt_init_int_mode(bp);
7562 if (rc) 7573 if (rc)
7563 goto init_err_pci_clean; 7574 goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index faf26a2f726b..c7a5b84a5cb2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -993,6 +993,7 @@ struct bnxt {
993 BNXT_FLAG_ROCEV2_CAP) 993 BNXT_FLAG_ROCEV2_CAP)
994 #define BNXT_FLAG_NO_AGG_RINGS 0x20000 994 #define BNXT_FLAG_NO_AGG_RINGS 0x20000
995 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 995 #define BNXT_FLAG_RX_PAGE_MODE 0x40000
996 #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
996 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 997 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
997 998
998 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ 999 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fdf2d8caf7bf..03532061d211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
474 return; 474 return;
475 475
476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; 476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
477 if (BNXT_PF(bp)) 477 if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST; 478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
479 else 479 else
480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; 480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f92896835d2a..69015fa50f20 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Broadcom GENET (Gigabit Ethernet) controller driver 2 * Broadcom GENET (Gigabit Ethernet) controller driver
3 * 3 *
4 * Copyright (c) 2014 Broadcom Corporation 4 * Copyright (c) 2014-2017 Broadcom
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
450 genet_dma_ring_regs[r]); 450 genet_dma_ring_regs[r]);
451} 451}
452 452
453static int bcmgenet_begin(struct net_device *dev)
454{
455 struct bcmgenet_priv *priv = netdev_priv(dev);
456
457 /* Turn on the clock */
458 return clk_prepare_enable(priv->clk);
459}
460
461static void bcmgenet_complete(struct net_device *dev)
462{
463 struct bcmgenet_priv *priv = netdev_priv(dev);
464
465 /* Turn off the clock */
466 clk_disable_unprepare(priv->clk);
467}
468
453static int bcmgenet_get_link_ksettings(struct net_device *dev, 469static int bcmgenet_get_link_ksettings(struct net_device *dev,
454 struct ethtool_link_ksettings *cmd) 470 struct ethtool_link_ksettings *cmd)
455{ 471{
@@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
778 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 794 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
779 /* Misc UniMAC counters */ 795 /* Misc UniMAC counters */
780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, 796 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
781 UMAC_RBUF_OVFL_CNT), 797 UMAC_RBUF_OVFL_CNT_V1),
782 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 798 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
799 UMAC_RBUF_ERR_CNT_V1),
783 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 800 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
784 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 801 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
785 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), 802 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
821 } 838 }
822} 839}
823 840
841static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
842{
843 u16 new_offset;
844 u32 val;
845
846 switch (offset) {
847 case UMAC_RBUF_OVFL_CNT_V1:
848 if (GENET_IS_V2(priv))
849 new_offset = RBUF_OVFL_CNT_V2;
850 else
851 new_offset = RBUF_OVFL_CNT_V3PLUS;
852
853 val = bcmgenet_rbuf_readl(priv, new_offset);
854 /* clear if overflowed */
855 if (val == ~0)
856 bcmgenet_rbuf_writel(priv, 0, new_offset);
857 break;
858 case UMAC_RBUF_ERR_CNT_V1:
859 if (GENET_IS_V2(priv))
860 new_offset = RBUF_ERR_CNT_V2;
861 else
862 new_offset = RBUF_ERR_CNT_V3PLUS;
863
864 val = bcmgenet_rbuf_readl(priv, new_offset);
865 /* clear if overflowed */
866 if (val == ~0)
867 bcmgenet_rbuf_writel(priv, 0, new_offset);
868 break;
869 default:
870 val = bcmgenet_umac_readl(priv, offset);
871 /* clear if overflowed */
872 if (val == ~0)
873 bcmgenet_umac_writel(priv, 0, offset);
874 break;
875 }
876
877 return val;
878}
879
824static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) 880static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
825{ 881{
826 int i, j = 0; 882 int i, j = 0;
@@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
836 case BCMGENET_STAT_NETDEV: 892 case BCMGENET_STAT_NETDEV:
837 case BCMGENET_STAT_SOFT: 893 case BCMGENET_STAT_SOFT:
838 continue; 894 continue;
839 case BCMGENET_STAT_MIB_RX:
840 case BCMGENET_STAT_MIB_TX:
841 case BCMGENET_STAT_RUNT: 895 case BCMGENET_STAT_RUNT:
842 if (s->type != BCMGENET_STAT_MIB_RX) 896 offset += BCMGENET_STAT_OFFSET;
843 offset = BCMGENET_STAT_OFFSET; 897 /* fall through */
898 case BCMGENET_STAT_MIB_TX:
899 offset += BCMGENET_STAT_OFFSET;
900 /* fall through */
901 case BCMGENET_STAT_MIB_RX:
844 val = bcmgenet_umac_readl(priv, 902 val = bcmgenet_umac_readl(priv,
845 UMAC_MIB_START + j + offset); 903 UMAC_MIB_START + j + offset);
904 offset = 0; /* Reset Offset */
846 break; 905 break;
847 case BCMGENET_STAT_MISC: 906 case BCMGENET_STAT_MISC:
848 val = bcmgenet_umac_readl(priv, s->reg_offset); 907 if (GENET_IS_V1(priv)) {
849 /* clear if overflowed */ 908 val = bcmgenet_umac_readl(priv, s->reg_offset);
850 if (val == ~0) 909 /* clear if overflowed */
851 bcmgenet_umac_writel(priv, 0, s->reg_offset); 910 if (val == ~0)
911 bcmgenet_umac_writel(priv, 0,
912 s->reg_offset);
913 } else {
914 val = bcmgenet_update_stat_misc(priv,
915 s->reg_offset);
916 }
852 break; 917 break;
853 } 918 }
854 919
@@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
973 1038
974/* standard ethtool support functions. */ 1039/* standard ethtool support functions. */
975static const struct ethtool_ops bcmgenet_ethtool_ops = { 1040static const struct ethtool_ops bcmgenet_ethtool_ops = {
1041 .begin = bcmgenet_begin,
1042 .complete = bcmgenet_complete,
976 .get_strings = bcmgenet_get_strings, 1043 .get_strings = bcmgenet_get_strings,
977 .get_sset_count = bcmgenet_get_sset_count, 1044 .get_sset_count = bcmgenet_get_sset_count,
978 .get_ethtool_stats = bcmgenet_get_ethtool_stats, 1045 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
@@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1167 struct bcmgenet_priv *priv = netdev_priv(dev); 1234 struct bcmgenet_priv *priv = netdev_priv(dev);
1168 struct device *kdev = &priv->pdev->dev; 1235 struct device *kdev = &priv->pdev->dev;
1169 struct enet_cb *tx_cb_ptr; 1236 struct enet_cb *tx_cb_ptr;
1170 struct netdev_queue *txq;
1171 unsigned int pkts_compl = 0; 1237 unsigned int pkts_compl = 0;
1172 unsigned int bytes_compl = 0; 1238 unsigned int bytes_compl = 0;
1173 unsigned int c_index; 1239 unsigned int c_index;
@@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1219 dev->stats.tx_packets += pkts_compl; 1285 dev->stats.tx_packets += pkts_compl;
1220 dev->stats.tx_bytes += bytes_compl; 1286 dev->stats.tx_bytes += bytes_compl;
1221 1287
1222 txq = netdev_get_tx_queue(dev, ring->queue); 1288 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1223 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 1289 pkts_compl, bytes_compl);
1224
1225 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1226 if (netif_tx_queue_stopped(txq))
1227 netif_tx_wake_queue(txq);
1228 }
1229 1290
1230 return pkts_compl; 1291 return pkts_compl;
1231} 1292}
@@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1248 struct bcmgenet_tx_ring *ring = 1309 struct bcmgenet_tx_ring *ring =
1249 container_of(napi, struct bcmgenet_tx_ring, napi); 1310 container_of(napi, struct bcmgenet_tx_ring, napi);
1250 unsigned int work_done = 0; 1311 unsigned int work_done = 0;
1312 struct netdev_queue *txq;
1313 unsigned long flags;
1251 1314
1252 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); 1315 spin_lock_irqsave(&ring->lock, flags);
1316 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1317 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1318 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1319 netif_tx_wake_queue(txq);
1320 }
1321 spin_unlock_irqrestore(&ring->lock, flags);
1253 1322
1254 if (work_done == 0) { 1323 if (work_done == 0) {
1255 napi_complete(napi); 1324 napi_complete(napi);
@@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2457/* Interrupt bottom half */ 2526/* Interrupt bottom half */
2458static void bcmgenet_irq_task(struct work_struct *work) 2527static void bcmgenet_irq_task(struct work_struct *work)
2459{ 2528{
2529 unsigned long flags;
2530 unsigned int status;
2460 struct bcmgenet_priv *priv = container_of( 2531 struct bcmgenet_priv *priv = container_of(
2461 work, struct bcmgenet_priv, bcmgenet_irq_work); 2532 work, struct bcmgenet_priv, bcmgenet_irq_work);
2462 2533
2463 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); 2534 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2464 2535
2465 if (priv->irq0_stat & UMAC_IRQ_MPD_R) { 2536 spin_lock_irqsave(&priv->lock, flags);
2466 priv->irq0_stat &= ~UMAC_IRQ_MPD_R; 2537 status = priv->irq0_stat;
2538 priv->irq0_stat = 0;
2539 spin_unlock_irqrestore(&priv->lock, flags);
2540
2541 if (status & UMAC_IRQ_MPD_R) {
2467 netif_dbg(priv, wol, priv->dev, 2542 netif_dbg(priv, wol, priv->dev,
2468 "magic packet detected, waking up\n"); 2543 "magic packet detected, waking up\n");
2469 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); 2544 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2470 } 2545 }
2471 2546
2472 /* Link UP/DOWN event */ 2547 /* Link UP/DOWN event */
2473 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { 2548 if (status & UMAC_IRQ_LINK_EVENT)
2474 phy_mac_interrupt(priv->phydev, 2549 phy_mac_interrupt(priv->phydev,
2475 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2550 !!(status & UMAC_IRQ_LINK_UP));
2476 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2477 }
2478} 2551}
2479 2552
2480/* bcmgenet_isr1: handle Rx and Tx priority queues */ 2553/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2483 struct bcmgenet_priv *priv = dev_id; 2556 struct bcmgenet_priv *priv = dev_id;
2484 struct bcmgenet_rx_ring *rx_ring; 2557 struct bcmgenet_rx_ring *rx_ring;
2485 struct bcmgenet_tx_ring *tx_ring; 2558 struct bcmgenet_tx_ring *tx_ring;
2486 unsigned int index; 2559 unsigned int index, status;
2487 2560
2488 /* Save irq status for bottom-half processing. */ 2561 /* Read irq status */
2489 priv->irq1_stat = 2562 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2490 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2491 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 2563 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2492 2564
2493 /* clear interrupts */ 2565 /* clear interrupts */
2494 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2566 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
2495 2567
2496 netif_dbg(priv, intr, priv->dev, 2568 netif_dbg(priv, intr, priv->dev,
2497 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2569 "%s: IRQ=0x%x\n", __func__, status);
2498 2570
2499 /* Check Rx priority queue interrupts */ 2571 /* Check Rx priority queue interrupts */
2500 for (index = 0; index < priv->hw_params->rx_queues; index++) { 2572 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2501 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) 2573 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2502 continue; 2574 continue;
2503 2575
2504 rx_ring = &priv->rx_rings[index]; 2576 rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2511 2583
2512 /* Check Tx priority queue interrupts */ 2584 /* Check Tx priority queue interrupts */
2513 for (index = 0; index < priv->hw_params->tx_queues; index++) { 2585 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2514 if (!(priv->irq1_stat & BIT(index))) 2586 if (!(status & BIT(index)))
2515 continue; 2587 continue;
2516 2588
2517 tx_ring = &priv->tx_rings[index]; 2589 tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2531 struct bcmgenet_priv *priv = dev_id; 2603 struct bcmgenet_priv *priv = dev_id;
2532 struct bcmgenet_rx_ring *rx_ring; 2604 struct bcmgenet_rx_ring *rx_ring;
2533 struct bcmgenet_tx_ring *tx_ring; 2605 struct bcmgenet_tx_ring *tx_ring;
2606 unsigned int status;
2607 unsigned long flags;
2534 2608
2535 /* Save irq status for bottom-half processing. */ 2609 /* Read irq status */
2536 priv->irq0_stat = 2610 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2537 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2538 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 2611 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2539 2612
2540 /* clear interrupts */ 2613 /* clear interrupts */
2541 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 2614 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
2542 2615
2543 netif_dbg(priv, intr, priv->dev, 2616 netif_dbg(priv, intr, priv->dev,
2544 "IRQ=0x%x\n", priv->irq0_stat); 2617 "IRQ=0x%x\n", status);
2545 2618
2546 if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { 2619 if (status & UMAC_IRQ_RXDMA_DONE) {
2547 rx_ring = &priv->rx_rings[DESC_INDEX]; 2620 rx_ring = &priv->rx_rings[DESC_INDEX];
2548 2621
2549 if (likely(napi_schedule_prep(&rx_ring->napi))) { 2622 if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2552 } 2625 }
2553 } 2626 }
2554 2627
2555 if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { 2628 if (status & UMAC_IRQ_TXDMA_DONE) {
2556 tx_ring = &priv->tx_rings[DESC_INDEX]; 2629 tx_ring = &priv->tx_rings[DESC_INDEX];
2557 2630
2558 if (likely(napi_schedule_prep(&tx_ring->napi))) { 2631 if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2561,22 +2634,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2561 } 2634 }
2562 } 2635 }
2563 2636
2564 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2565 UMAC_IRQ_PHY_DET_F |
2566 UMAC_IRQ_LINK_EVENT |
2567 UMAC_IRQ_HFB_SM |
2568 UMAC_IRQ_HFB_MM |
2569 UMAC_IRQ_MPD_R)) {
2570 /* all other interested interrupts handled in bottom half */
2571 schedule_work(&priv->bcmgenet_irq_work);
2572 }
2573
2574 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2637 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2575 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { 2638 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2576 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2577 wake_up(&priv->wq); 2639 wake_up(&priv->wq);
2578 } 2640 }
2579 2641
2642 /* all other interested interrupts handled in bottom half */
2643 status &= (UMAC_IRQ_LINK_EVENT |
2644 UMAC_IRQ_MPD_R);
2645 if (status) {
2646 /* Save irq status for bottom-half processing. */
2647 spin_lock_irqsave(&priv->lock, flags);
2648 priv->irq0_stat |= status;
2649 spin_unlock_irqrestore(&priv->lock, flags);
2650
2651 schedule_work(&priv->bcmgenet_irq_work);
2652 }
2653
2580 return IRQ_HANDLED; 2654 return IRQ_HANDLED;
2581} 2655}
2582 2656
@@ -2801,6 +2875,8 @@ err_irq0:
2801err_fini_dma: 2875err_fini_dma:
2802 bcmgenet_fini_dma(priv); 2876 bcmgenet_fini_dma(priv);
2803err_clk_disable: 2877err_clk_disable:
2878 if (priv->internal_phy)
2879 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2804 clk_disable_unprepare(priv->clk); 2880 clk_disable_unprepare(priv->clk);
2805 return ret; 2881 return ret;
2806} 2882}
@@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3177 */ 3253 */
3178 gphy_rev = reg & 0xffff; 3254 gphy_rev = reg & 0xffff;
3179 3255
3256 /* This is reserved so should require special treatment */
3257 if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3258 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3259 return;
3260 }
3261
3180 /* This is the good old scheme, just GPHY major, no minor nor patch */ 3262 /* This is the good old scheme, just GPHY major, no minor nor patch */
3181 if ((gphy_rev & 0xf0) != 0) 3263 if ((gphy_rev & 0xf0) != 0)
3182 priv->gphy_rev = gphy_rev << 8; 3264 priv->gphy_rev = gphy_rev << 8;
@@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3185 else if ((gphy_rev & 0xff00) != 0) 3267 else if ((gphy_rev & 0xff00) != 0)
3186 priv->gphy_rev = gphy_rev; 3268 priv->gphy_rev = gphy_rev;
3187 3269
3188 /* This is reserved so should require special treatment */
3189 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3190 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3191 return;
3192 }
3193
3194#ifdef CONFIG_PHYS_ADDR_T_64BIT 3270#ifdef CONFIG_PHYS_ADDR_T_64BIT
3195 if (!(params->flags & GENET_HAS_40BITS)) 3271 if (!(params->flags & GENET_HAS_40BITS))
3196 pr_warn("GENET does not support 40-bits PA\n"); 3272 pr_warn("GENET does not support 40-bits PA\n");
@@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
3233 const void *macaddr; 3309 const void *macaddr;
3234 struct resource *r; 3310 struct resource *r;
3235 int err = -EIO; 3311 int err = -EIO;
3312 const char *phy_mode_str;
3236 3313
3237 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ 3314 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3238 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 3315 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
3276 goto err; 3353 goto err;
3277 } 3354 }
3278 3355
3356 spin_lock_init(&priv->lock);
3357
3279 SET_NETDEV_DEV(dev, &pdev->dev); 3358 SET_NETDEV_DEV(dev, &pdev->dev);
3280 dev_set_drvdata(&pdev->dev, dev); 3359 dev_set_drvdata(&pdev->dev, dev);
3281 ether_addr_copy(dev->dev_addr, macaddr); 3360 ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
3338 priv->clk_eee = NULL; 3417 priv->clk_eee = NULL;
3339 } 3418 }
3340 3419
3420 /* If this is an internal GPHY, power it on now, before UniMAC is
3421 * brought out of reset as absolutely no UniMAC activity is allowed
3422 */
3423 if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3424 !strcasecmp(phy_mode_str, "internal"))
3425 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3426
3341 err = reset_umac(priv); 3427 err = reset_umac(priv);
3342 if (err) 3428 if (err)
3343 goto err_clk_disable; 3429 goto err_clk_disable;
@@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
3502 return 0; 3588 return 0;
3503 3589
3504out_clk_disable: 3590out_clk_disable:
3591 if (priv->internal_phy)
3592 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3505 clk_disable_unprepare(priv->clk); 3593 clk_disable_unprepare(priv->clk);
3506 return ret; 3594 return ret;
3507} 3595}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a..db7f289d65ae 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 Broadcom Corporation 2 * Copyright (c) 2014-2017 Broadcom
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
214#define MDIO_REG_SHIFT 16 214#define MDIO_REG_SHIFT 16
215#define MDIO_REG_MASK 0x1F 215#define MDIO_REG_MASK 0x1F
216 216
217#define UMAC_RBUF_OVFL_CNT 0x61C 217#define UMAC_RBUF_OVFL_CNT_V1 0x61C
218#define RBUF_OVFL_CNT_V2 0x80
219#define RBUF_OVFL_CNT_V3PLUS 0x94
218 220
219#define UMAC_MPD_CTRL 0x620 221#define UMAC_MPD_CTRL 0x620
220#define MPD_EN (1 << 0) 222#define MPD_EN (1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
224 226
225#define UMAC_MPD_PW_MS 0x624 227#define UMAC_MPD_PW_MS 0x624
226#define UMAC_MPD_PW_LS 0x628 228#define UMAC_MPD_PW_LS 0x628
227#define UMAC_RBUF_ERR_CNT 0x634 229#define UMAC_RBUF_ERR_CNT_V1 0x634
230#define RBUF_ERR_CNT_V2 0x84
231#define RBUF_ERR_CNT_V3PLUS 0x98
228#define UMAC_MDF_ERR_CNT 0x638 232#define UMAC_MDF_ERR_CNT 0x638
229#define UMAC_MDF_CTRL 0x650 233#define UMAC_MDF_CTRL 0x650
230#define UMAC_MDF_ADDR 0x654 234#define UMAC_MDF_ADDR 0x654
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
619 struct work_struct bcmgenet_irq_work; 623 struct work_struct bcmgenet_irq_work;
620 int irq0; 624 int irq0;
621 int irq1; 625 int irq1;
622 unsigned int irq0_stat;
623 unsigned int irq1_stat;
624 int wol_irq; 626 int wol_irq;
625 bool wol_irq_disabled; 627 bool wol_irq_disabled;
626 628
629 /* shared status */
630 spinlock_t lock;
631 unsigned int irq0_stat;
632
627 /* HW descriptors/checksum variables */ 633 /* HW descriptors/checksum variables */
628 bool desc_64b_en; 634 bool desc_64b_en;
629 bool desc_rxchk_en; 635 bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index be9c0e3f5ade..92f46b1375c3 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -152,7 +152,7 @@ struct octnic_gather {
152 */ 152 */
153 struct octeon_sg_entry *sg; 153 struct octeon_sg_entry *sg;
154 154
155 u64 sg_dma_ptr; 155 dma_addr_t sg_dma_ptr;
156}; 156};
157 157
158struct handshake { 158struct handshake {
@@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
734 struct octnic_gather *g; 734 struct octnic_gather *g;
735 int i; 735 int i;
736 736
737 kfree(lio->glist_lock);
738 lio->glist_lock = NULL;
739
737 if (!lio->glist) 740 if (!lio->glist)
738 return; 741 return;
739 742
@@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
741 do { 744 do {
742 g = (struct octnic_gather *) 745 g = (struct octnic_gather *)
743 list_delete_head(&lio->glist[i]); 746 list_delete_head(&lio->glist[i]);
744 if (g) { 747 if (g)
745 if (g->sg) {
746 dma_unmap_single(&lio->oct_dev->
747 pci_dev->dev,
748 g->sg_dma_ptr,
749 g->sg_size,
750 DMA_TO_DEVICE);
751 kfree((void *)((unsigned long)g->sg -
752 g->adjust));
753 }
754 kfree(g); 748 kfree(g);
755 }
756 } while (g); 749 } while (g);
750
751 if (lio->glists_virt_base && lio->glists_virt_base[i]) {
752 lio_dma_free(lio->oct_dev,
753 lio->glist_entry_size * lio->tx_qsize,
754 lio->glists_virt_base[i],
755 lio->glists_dma_base[i]);
756 }
757 } 757 }
758 758
759 kfree((void *)lio->glist); 759 kfree(lio->glists_virt_base);
760 kfree((void *)lio->glist_lock); 760 lio->glists_virt_base = NULL;
761
762 kfree(lio->glists_dma_base);
763 lio->glists_dma_base = NULL;
764
765 kfree(lio->glist);
766 lio->glist = NULL;
761} 767}
762 768
763/** 769/**
@@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
772 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), 778 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
773 GFP_KERNEL); 779 GFP_KERNEL);
774 if (!lio->glist_lock) 780 if (!lio->glist_lock)
775 return 1; 781 return -ENOMEM;
776 782
777 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), 783 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
778 GFP_KERNEL); 784 GFP_KERNEL);
779 if (!lio->glist) { 785 if (!lio->glist) {
780 kfree((void *)lio->glist_lock); 786 kfree(lio->glist_lock);
781 return 1; 787 lio->glist_lock = NULL;
788 return -ENOMEM;
789 }
790
791 lio->glist_entry_size =
792 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
793
794 /* allocate memory to store virtual and dma base address of
795 * per glist consistent memory
796 */
797 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
798 GFP_KERNEL);
799 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
800 GFP_KERNEL);
801
802 if (!lio->glists_virt_base || !lio->glists_dma_base) {
803 delete_glists(lio);
804 return -ENOMEM;
782 } 805 }
783 806
784 for (i = 0; i < num_iqs; i++) { 807 for (i = 0; i < num_iqs; i++) {
@@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
788 811
789 INIT_LIST_HEAD(&lio->glist[i]); 812 INIT_LIST_HEAD(&lio->glist[i]);
790 813
814 lio->glists_virt_base[i] =
815 lio_dma_alloc(oct,
816 lio->glist_entry_size * lio->tx_qsize,
817 &lio->glists_dma_base[i]);
818
819 if (!lio->glists_virt_base[i]) {
820 delete_glists(lio);
821 return -ENOMEM;
822 }
823
791 for (j = 0; j < lio->tx_qsize; j++) { 824 for (j = 0; j < lio->tx_qsize; j++) {
792 g = kzalloc_node(sizeof(*g), GFP_KERNEL, 825 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
793 numa_node); 826 numa_node);
@@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
796 if (!g) 829 if (!g)
797 break; 830 break;
798 831
799 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * 832 g->sg = lio->glists_virt_base[i] +
800 OCT_SG_ENTRY_SIZE); 833 (j * lio->glist_entry_size);
801 834
802 g->sg = kmalloc_node(g->sg_size + 8, 835 g->sg_dma_ptr = lio->glists_dma_base[i] +
803 GFP_KERNEL, numa_node); 836 (j * lio->glist_entry_size);
804 if (!g->sg)
805 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
806 if (!g->sg) {
807 kfree(g);
808 break;
809 }
810
811 /* The gather component should be aligned on 64-bit
812 * boundary
813 */
814 if (((unsigned long)g->sg) & 7) {
815 g->adjust = 8 - (((unsigned long)g->sg) & 7);
816 g->sg = (struct octeon_sg_entry *)
817 ((unsigned long)g->sg + g->adjust);
818 }
819 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
820 g->sg, g->sg_size,
821 DMA_TO_DEVICE);
822 if (dma_mapping_error(&oct->pci_dev->dev,
823 g->sg_dma_ptr)) {
824 kfree((void *)((unsigned long)g->sg -
825 g->adjust));
826 kfree(g);
827 break;
828 }
829 837
830 list_add_tail(&g->list, &lio->glist[i]); 838 list_add_tail(&g->list, &lio->glist[i]);
831 } 839 }
832 840
833 if (j != lio->tx_qsize) { 841 if (j != lio->tx_qsize) {
834 delete_glists(lio); 842 delete_glists(lio);
835 return 1; 843 return -ENOMEM;
836 } 844 }
837 } 845 }
838 846
@@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
1885 i++; 1893 i++;
1886 } 1894 }
1887 1895
1888 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1889 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1890
1891 iq = skb_iq(lio, skb); 1896 iq = skb_iq(lio, skb);
1892 spin_lock(&lio->glist_lock[iq]); 1897 spin_lock(&lio->glist_lock[iq]);
1893 list_add_tail(&g->list, &lio->glist[iq]); 1898 list_add_tail(&g->list, &lio->glist[iq]);
@@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
1933 i++; 1938 i++;
1934 } 1939 }
1935 1940
1936 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1937 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1938
1939 iq = skb_iq(lio, skb); 1941 iq = skb_iq(lio, skb);
1940 1942
1941 spin_lock(&lio->glist_lock[iq]); 1943 spin_lock(&lio->glist_lock[iq]);
@@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
3273 i++; 3275 i++;
3274 } 3276 }
3275 3277
3276 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3277 g->sg_size, DMA_TO_DEVICE);
3278 dptr = g->sg_dma_ptr; 3278 dptr = g->sg_dma_ptr;
3279 3279
3280 if (OCTEON_CN23XX_PF(oct)) 3280 if (OCTEON_CN23XX_PF(oct))
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 9d5e03502c76..7b83be4ce1fe 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -108,6 +108,8 @@ struct octnic_gather {
108 * received from the IP layer. 108 * received from the IP layer.
109 */ 109 */
110 struct octeon_sg_entry *sg; 110 struct octeon_sg_entry *sg;
111
112 dma_addr_t sg_dma_ptr;
111}; 113};
112 114
113struct octeon_device_priv { 115struct octeon_device_priv {
@@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
490 struct octnic_gather *g; 492 struct octnic_gather *g;
491 int i; 493 int i;
492 494
495 kfree(lio->glist_lock);
496 lio->glist_lock = NULL;
497
493 if (!lio->glist) 498 if (!lio->glist)
494 return; 499 return;
495 500
@@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
497 do { 502 do {
498 g = (struct octnic_gather *) 503 g = (struct octnic_gather *)
499 list_delete_head(&lio->glist[i]); 504 list_delete_head(&lio->glist[i]);
500 if (g) { 505 if (g)
501 if (g->sg)
502 kfree((void *)((unsigned long)g->sg -
503 g->adjust));
504 kfree(g); 506 kfree(g);
505 }
506 } while (g); 507 } while (g);
508
509 if (lio->glists_virt_base && lio->glists_virt_base[i]) {
510 lio_dma_free(lio->oct_dev,
511 lio->glist_entry_size * lio->tx_qsize,
512 lio->glists_virt_base[i],
513 lio->glists_dma_base[i]);
514 }
507 } 515 }
508 516
517 kfree(lio->glists_virt_base);
518 lio->glists_virt_base = NULL;
519
520 kfree(lio->glists_dma_base);
521 lio->glists_dma_base = NULL;
522
509 kfree(lio->glist); 523 kfree(lio->glist);
510 kfree(lio->glist_lock); 524 lio->glist = NULL;
511} 525}
512 526
513/** 527/**
@@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
522 lio->glist_lock = 536 lio->glist_lock =
523 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); 537 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
524 if (!lio->glist_lock) 538 if (!lio->glist_lock)
525 return 1; 539 return -ENOMEM;
526 540
527 lio->glist = 541 lio->glist =
528 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); 542 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
529 if (!lio->glist) { 543 if (!lio->glist) {
530 kfree(lio->glist_lock); 544 kfree(lio->glist_lock);
531 return 1; 545 lio->glist_lock = NULL;
546 return -ENOMEM;
547 }
548
549 lio->glist_entry_size =
550 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
551
552 /* allocate memory to store virtual and dma base address of
553 * per glist consistent memory
554 */
555 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
556 GFP_KERNEL);
557 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
558 GFP_KERNEL);
559
560 if (!lio->glists_virt_base || !lio->glists_dma_base) {
561 delete_glists(lio);
562 return -ENOMEM;
532 } 563 }
533 564
534 for (i = 0; i < num_iqs; i++) { 565 for (i = 0; i < num_iqs; i++) {
@@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
536 567
537 INIT_LIST_HEAD(&lio->glist[i]); 568 INIT_LIST_HEAD(&lio->glist[i]);
538 569
570 lio->glists_virt_base[i] =
571 lio_dma_alloc(lio->oct_dev,
572 lio->glist_entry_size * lio->tx_qsize,
573 &lio->glists_dma_base[i]);
574
575 if (!lio->glists_virt_base[i]) {
576 delete_glists(lio);
577 return -ENOMEM;
578 }
579
539 for (j = 0; j < lio->tx_qsize; j++) { 580 for (j = 0; j < lio->tx_qsize; j++) {
540 g = kzalloc(sizeof(*g), GFP_KERNEL); 581 g = kzalloc(sizeof(*g), GFP_KERNEL);
541 if (!g) 582 if (!g)
542 break; 583 break;
543 584
544 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * 585 g->sg = lio->glists_virt_base[i] +
545 OCT_SG_ENTRY_SIZE); 586 (j * lio->glist_entry_size);
546 587
547 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); 588 g->sg_dma_ptr = lio->glists_dma_base[i] +
548 if (!g->sg) { 589 (j * lio->glist_entry_size);
549 kfree(g);
550 break;
551 }
552 590
553 /* The gather component should be aligned on 64-bit
554 * boundary
555 */
556 if (((unsigned long)g->sg) & 7) {
557 g->adjust = 8 - (((unsigned long)g->sg) & 7);
558 g->sg = (struct octeon_sg_entry *)
559 ((unsigned long)g->sg + g->adjust);
560 }
561 list_add_tail(&g->list, &lio->glist[i]); 591 list_add_tail(&g->list, &lio->glist[i]);
562 } 592 }
563 593
564 if (j != lio->tx_qsize) { 594 if (j != lio->tx_qsize) {
565 delete_glists(lio); 595 delete_glists(lio);
566 return 1; 596 return -ENOMEM;
567 } 597 }
568 } 598 }
569 599
@@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
1324 i++; 1354 i++;
1325 } 1355 }
1326 1356
1327 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1328 finfo->dptr, g->sg_size,
1329 DMA_TO_DEVICE);
1330
1331 iq = skb_iq(lio, skb); 1357 iq = skb_iq(lio, skb);
1332 1358
1333 spin_lock(&lio->glist_lock[iq]); 1359 spin_lock(&lio->glist_lock[iq]);
@@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
1374 i++; 1400 i++;
1375 } 1401 }
1376 1402
1377 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1378 finfo->dptr, g->sg_size,
1379 DMA_TO_DEVICE);
1380
1381 iq = skb_iq(lio, skb); 1403 iq = skb_iq(lio, skb);
1382 1404
1383 spin_lock(&lio->glist_lock[iq]); 1405 spin_lock(&lio->glist_lock[iq]);
@@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2382 i++; 2404 i++;
2383 } 2405 }
2384 2406
2385 dptr = dma_map_single(&oct->pci_dev->dev, 2407 dptr = g->sg_dma_ptr;
2386 g->sg, g->sg_size,
2387 DMA_TO_DEVICE);
2388 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2389 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
2390 __func__);
2391 dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
2392 skb->len - skb->data_len,
2393 DMA_TO_DEVICE);
2394 for (j = 1; j <= frags; j++) {
2395 frag = &skb_shinfo(skb)->frags[j - 1];
2396 dma_unmap_page(&oct->pci_dev->dev,
2397 g->sg[j >> 2].ptr[j & 3],
2398 frag->size, DMA_TO_DEVICE);
2399 }
2400 return NETDEV_TX_BUSY;
2401 }
2402 2408
2403 ndata.cmd.cmd3.dptr = dptr; 2409 ndata.cmd.cmd3.dptr = dptr;
2404 finfo->dptr = dptr; 2410 finfo->dptr = dptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3dc2e9651a8..d29ebc531151 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -71,17 +71,17 @@
71#define CN23XX_MAX_RINGS_PER_VF 8 71#define CN23XX_MAX_RINGS_PER_VF 8
72 72
73#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF 73#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
74#define CN23XX_MAX_IQ_DESCRIPTORS 2048 74#define CN23XX_MAX_IQ_DESCRIPTORS 512
75#define CN23XX_DB_MIN 1 75#define CN23XX_DB_MIN 1
76#define CN23XX_DB_MAX 8 76#define CN23XX_DB_MAX 8
77#define CN23XX_DB_TIMEOUT 1 77#define CN23XX_DB_TIMEOUT 1
78 78
79#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF 79#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
80#define CN23XX_MAX_OQ_DESCRIPTORS 2048 80#define CN23XX_MAX_OQ_DESCRIPTORS 512
81#define CN23XX_OQ_BUF_SIZE 1536 81#define CN23XX_OQ_BUF_SIZE 1536
82#define CN23XX_OQ_PKTSPER_INTR 128 82#define CN23XX_OQ_PKTSPER_INTR 128
83/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/ 83/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
84#define CN23XX_OQ_REFIL_THRESHOLD 128 84#define CN23XX_OQ_REFIL_THRESHOLD 16
85 85
86#define CN23XX_OQ_INTR_PKT 64 86#define CN23XX_OQ_INTR_PKT 64
87#define CN23XX_OQ_INTR_TIME 100 87#define CN23XX_OQ_INTR_TIME 100
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0be87d119a97..79f809479af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
155 recv_buffer_destroy(droq->recv_buf_list[i].buffer, 155 recv_buffer_destroy(droq->recv_buf_list[i].buffer,
156 pg_info); 156 pg_info);
157 157
158 if (droq->desc_ring && droq->desc_ring[i].info_ptr)
159 lio_unmap_ring_info(oct->pci_dev,
160 (u64)droq->
161 desc_ring[i].info_ptr,
162 OCT_DROQ_INFO_SIZE);
163 droq->recv_buf_list[i].buffer = NULL; 158 droq->recv_buf_list[i].buffer = NULL;
164 } 159 }
165 160
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
211 vfree(droq->recv_buf_list); 206 vfree(droq->recv_buf_list);
212 207
213 if (droq->info_base_addr) 208 if (droq->info_base_addr)
214 cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, 209 lio_free_info_buffer(oct, droq);
215 droq->info_alloc_size,
216 droq->info_base_addr,
217 droq->info_list_dma);
218 210
219 if (droq->desc_ring) 211 if (droq->desc_ring)
220 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 212 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
294 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, 286 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
295 droq->max_count); 287 droq->max_count);
296 288
297 droq->info_list = 289 droq->info_list = lio_alloc_info_buffer(oct, droq);
298 cnnic_numa_alloc_aligned_dma((droq->max_count *
299 OCT_DROQ_INFO_SIZE),
300 &droq->info_alloc_size,
301 &droq->info_base_addr,
302 numa_node);
303 if (!droq->info_list) { 290 if (!droq->info_list) {
304 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); 291 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
305 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), 292 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index e62074090681..6982c0af5ecc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -325,10 +325,10 @@ struct octeon_droq {
325 size_t desc_ring_dma; 325 size_t desc_ring_dma;
326 326
327 /** Info ptr list are allocated at this virtual address. */ 327 /** Info ptr list are allocated at this virtual address. */
328 size_t info_base_addr; 328 void *info_base_addr;
329 329
330 /** DMA mapped address of the info list */ 330 /** DMA mapped address of the info list */
331 size_t info_list_dma; 331 dma_addr_t info_list_dma;
332 332
333 /** Allocated size of info list. */ 333 /** Allocated size of info list. */
334 u32 info_alloc_size; 334 u32 info_alloc_size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index aa36e9ae7676..bed9ef17bc26 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -140,48 +140,6 @@ err_release_region:
140 return 1; 140 return 1;
141} 141}
142 142
143static inline void *
144cnnic_numa_alloc_aligned_dma(u32 size,
145 u32 *alloc_size,
146 size_t *orig_ptr,
147 int numa_node)
148{
149 int retries = 0;
150 void *ptr = NULL;
151
152#define OCTEON_MAX_ALLOC_RETRIES 1
153 do {
154 struct page *page = NULL;
155
156 page = alloc_pages_node(numa_node,
157 GFP_KERNEL,
158 get_order(size));
159 if (!page)
160 page = alloc_pages(GFP_KERNEL,
161 get_order(size));
162 ptr = (void *)page_address(page);
163 if ((unsigned long)ptr & 0x07) {
164 __free_pages(page, get_order(size));
165 ptr = NULL;
166 /* Increment the size required if the first
167 * attempt failed.
168 */
169 if (!retries)
170 size += 7;
171 }
172 retries++;
173 } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
174
175 *alloc_size = size;
176 *orig_ptr = (unsigned long)ptr;
177 if ((unsigned long)ptr & 0x07)
178 ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
179 return ptr;
180}
181
182#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
183 free_pages(orig_ptr, get_order(size))
184
185static inline int 143static inline int
186sleep_cond(wait_queue_head_t *wait_queue, int *condition) 144sleep_cond(wait_queue_head_t *wait_queue, int *condition)
187{ 145{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 6bb89419006e..eef2a1e8a7e3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -62,6 +62,9 @@ struct lio {
62 62
63 /** Array of gather component linked lists */ 63 /** Array of gather component linked lists */
64 struct list_head *glist; 64 struct list_head *glist;
65 void **glists_virt_base;
66 dma_addr_t *glists_dma_base;
67 u32 glist_entry_size;
65 68
66 /** Pointer to the NIC properties for the Octeon device this network 69 /** Pointer to the NIC properties for the Octeon device this network
67 * interface is associated with. 70 * interface is associated with.
@@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
344#define lio_dma_free(oct, size, virt_addr, dma_addr) \ 347#define lio_dma_free(oct, size, virt_addr, dma_addr) \
345 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) 348 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
346 349
350static inline void *
351lio_alloc_info_buffer(struct octeon_device *oct,
352 struct octeon_droq *droq)
353{
354 void *virt_ptr;
355
356 virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
357 &droq->info_list_dma);
358 if (virt_ptr) {
359 droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
360 droq->info_base_addr = virt_ptr;
361 }
362
363 return virt_ptr;
364}
365
366static inline void lio_free_info_buffer(struct octeon_device *oct,
367 struct octeon_droq *droq)
368{
369 lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
370 droq->info_list_dma);
371}
372
347static inline 373static inline
348void *get_rbd(struct sk_buff *skb) 374void *get_rbd(struct sk_buff *skb)
349{ 375{
@@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
359static inline u64 385static inline u64
360lio_map_ring_info(struct octeon_droq *droq, u32 i) 386lio_map_ring_info(struct octeon_droq *droq, u32 i)
361{ 387{
362 dma_addr_t dma_addr; 388 return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
363 struct octeon_device *oct = droq->oct_dev;
364
365 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
366 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
367
368 WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
369
370 return (u64)dma_addr;
371}
372
373static inline void
374lio_unmap_ring_info(struct pci_dev *pci_dev,
375 u64 info_ptr, u32 size)
376{
377 dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
378} 389}
379 390
380static inline u64 391static inline u64
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e739c7153562..2269ff562d95 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -269,6 +269,7 @@ struct nicvf {
269#define MAX_QUEUES_PER_QSET 8 269#define MAX_QUEUES_PER_QSET 8
270 struct queue_set *qs; 270 struct queue_set *qs;
271 struct nicvf_cq_poll *napi[8]; 271 struct nicvf_cq_poll *napi[8];
272 void *iommu_domain;
272 u8 vf_id; 273 u8 vf_id;
273 u8 sqs_id; 274 u8 sqs_id;
274 bool sqs_mode; 275 bool sqs_mode;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 6feaa24bcfd4..24017588f531 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -16,6 +16,7 @@
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/prefetch.h> 17#include <linux/prefetch.h>
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/iommu.h>
19 20
20#include "nic_reg.h" 21#include "nic_reg.h"
21#include "nic.h" 22#include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
525 /* Get actual TSO descriptors and free them */ 526 /* Get actual TSO descriptors and free them */
526 tso_sqe = 527 tso_sqe =
527 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); 528 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
529 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
530 tso_sqe->subdesc_cnt);
528 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); 531 nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
532 } else {
533 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
534 hdr->subdesc_cnt);
529 } 535 }
530 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 536 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
531 prefetch(skb); 537 prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576{ 582{
577 struct sk_buff *skb; 583 struct sk_buff *skb;
578 struct nicvf *nic = netdev_priv(netdev); 584 struct nicvf *nic = netdev_priv(netdev);
585 struct nicvf *snic = nic;
579 int err = 0; 586 int err = 0;
580 int rq_idx; 587 int rq_idx;
581 588
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
592 if (err && !cqe_rx->rb_cnt) 599 if (err && !cqe_rx->rb_cnt)
593 return; 600 return;
594 601
595 skb = nicvf_get_rcv_skb(nic, cqe_rx); 602 skb = nicvf_get_rcv_skb(snic, cqe_rx);
596 if (!skb) { 603 if (!skb) {
597 netdev_dbg(nic->netdev, "Packet not received\n"); 604 netdev_dbg(nic->netdev, "Packet not received\n");
598 return; 605 return;
@@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1643 if (!pass1_silicon(nic->pdev)) 1650 if (!pass1_silicon(nic->pdev))
1644 nic->hw_tso = true; 1651 nic->hw_tso = true;
1645 1652
1653 /* Get iommu domain for iova to physical addr conversion */
1654 nic->iommu_domain = iommu_get_domain_for_dev(dev);
1655
1646 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); 1656 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1647 if (sdevid == 0xA134) 1657 if (sdevid == 0xA134)
1648 nic->t88 = true; 1658 nic->t88 = true;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ac0390be3b12..f13289f0d238 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/ip.h> 11#include <linux/ip.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/iommu.h>
13#include <net/ip.h> 14#include <net/ip.h>
14#include <net/tso.h> 15#include <net/tso.h>
15 16
@@ -18,6 +19,16 @@
18#include "q_struct.h" 19#include "q_struct.h"
19#include "nicvf_queues.h" 20#include "nicvf_queues.h"
20 21
22#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
23
24static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
25{
26 /* Translation is installed only when IOMMU is present */
27 if (nic->iommu_domain)
28 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
29 return dma_addr;
30}
31
21static void nicvf_get_page(struct nicvf *nic) 32static void nicvf_get_page(struct nicvf *nic)
22{ 33{
23 if (!nic->rb_pageref || !nic->rb_page) 34 if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
87static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, 98static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
88 u32 buf_len, u64 **rbuf) 99 u32 buf_len, u64 **rbuf)
89{ 100{
90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; 101 int order = NICVF_PAGE_ORDER;
91 102
92 /* Check if request can be accomodated in previous allocated page */ 103 /* Check if request can be accomodated in previous allocated page */
93 if (nic->rb_page && 104 if (nic->rb_page &&
@@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
97 } 108 }
98 109
99 nicvf_get_page(nic); 110 nicvf_get_page(nic);
100 nic->rb_page = NULL;
101 111
102 /* Allocate a new page */ 112 /* Allocate a new page */
113 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
114 order);
103 if (!nic->rb_page) { 115 if (!nic->rb_page) {
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 116 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
105 order); 117 return -ENOMEM;
106 if (!nic->rb_page) {
107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
109 return -ENOMEM;
110 }
111 nic->rb_page_offset = 0;
112 } 118 }
113 119 nic->rb_page_offset = 0;
114ret: 120ret:
115 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); 121 /* HW will ensure data coherency, CPU sync not required */
122 *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
123 nic->rb_page_offset, buf_len,
124 DMA_FROM_DEVICE,
125 DMA_ATTR_SKIP_CPU_SYNC));
126 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
127 if (!nic->rb_page_offset)
128 __free_pages(nic->rb_page, order);
129 nic->rb_page = NULL;
130 return -ENOMEM;
131 }
116 nic->rb_page_offset += buf_len; 132 nic->rb_page_offset += buf_len;
117 133
118 return 0; 134 return 0;
@@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
158 rbdr->dma_size = buf_size; 174 rbdr->dma_size = buf_size;
159 rbdr->enable = true; 175 rbdr->enable = true;
160 rbdr->thresh = RBDR_THRESH; 176 rbdr->thresh = RBDR_THRESH;
177 rbdr->head = 0;
178 rbdr->tail = 0;
161 179
162 nic->rb_page = NULL; 180 nic->rb_page = NULL;
163 for (idx = 0; idx < ring_len; idx++) { 181 for (idx = 0; idx < ring_len; idx++) {
164 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, 182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
165 &rbuf); 183 &rbuf);
166 if (err) 184 if (err) {
185 /* To free already allocated and mapped ones */
186 rbdr->tail = idx - 1;
167 return err; 187 return err;
188 }
168 189
169 desc = GET_RBDR_DESC(rbdr, idx); 190 desc = GET_RBDR_DESC(rbdr, idx);
170 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 191 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
171 } 192 }
172 193
173 nicvf_get_page(nic); 194 nicvf_get_page(nic);
@@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
179static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) 200static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
180{ 201{
181 int head, tail; 202 int head, tail;
182 u64 buf_addr; 203 u64 buf_addr, phys_addr;
183 struct rbdr_entry_t *desc; 204 struct rbdr_entry_t *desc;
184 205
185 if (!rbdr) 206 if (!rbdr)
@@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
192 head = rbdr->head; 213 head = rbdr->head;
193 tail = rbdr->tail; 214 tail = rbdr->tail;
194 215
195 /* Free SKBs */ 216 /* Release page references */
196 while (head != tail) { 217 while (head != tail) {
197 desc = GET_RBDR_DESC(rbdr, head); 218 desc = GET_RBDR_DESC(rbdr, head);
198 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 219 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
199 put_page(virt_to_page(phys_to_virt(buf_addr))); 220 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
221 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
222 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
223 if (phys_addr)
224 put_page(virt_to_page(phys_to_virt(phys_addr)));
200 head++; 225 head++;
201 head &= (rbdr->dmem.q_len - 1); 226 head &= (rbdr->dmem.q_len - 1);
202 } 227 }
203 /* Free SKB of tail desc */ 228 /* Release buffer of tail desc */
204 desc = GET_RBDR_DESC(rbdr, tail); 229 desc = GET_RBDR_DESC(rbdr, tail);
205 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; 230 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
206 put_page(virt_to_page(phys_to_virt(buf_addr))); 231 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
232 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
233 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
234 if (phys_addr)
235 put_page(virt_to_page(phys_to_virt(phys_addr)));
207 236
208 /* Free RBDR ring */ 237 /* Free RBDR ring */
209 nicvf_free_q_desc_mem(nic, &rbdr->dmem); 238 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +279,7 @@ refill:
250 break; 279 break;
251 280
252 desc = GET_RBDR_DESC(rbdr, tail); 281 desc = GET_RBDR_DESC(rbdr, tail);
253 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; 282 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
254 refill_rb_cnt--; 283 refill_rb_cnt--;
255 new_rb++; 284 new_rb++;
256 } 285 }
@@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
361 return 0; 390 return 0;
362} 391}
363 392
393void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
394 int hdr_sqe, u8 subdesc_cnt)
395{
396 u8 idx;
397 struct sq_gather_subdesc *gather;
398
399 /* Unmap DMA mapped skb data buffers */
400 for (idx = 0; idx < subdesc_cnt; idx++) {
401 hdr_sqe++;
402 hdr_sqe &= (sq->dmem.q_len - 1);
403 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
404 /* HW will ensure data coherency, CPU sync not required */
405 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
406 gather->size, DMA_TO_DEVICE,
407 DMA_ATTR_SKIP_CPU_SYNC);
408 }
409}
410
364static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 411static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
365{ 412{
366 struct sk_buff *skb; 413 struct sk_buff *skb;
414 struct sq_hdr_subdesc *hdr;
415 struct sq_hdr_subdesc *tso_sqe;
367 416
368 if (!sq) 417 if (!sq)
369 return; 418 return;
@@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
379 smp_rmb(); 428 smp_rmb();
380 while (sq->head != sq->tail) { 429 while (sq->head != sq->tail) {
381 skb = (struct sk_buff *)sq->skbuff[sq->head]; 430 skb = (struct sk_buff *)sq->skbuff[sq->head];
382 if (skb) 431 if (!skb)
383 dev_kfree_skb_any(skb); 432 goto next;
433 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
434 /* Check for dummy descriptor used for HW TSO offload on 88xx */
435 if (hdr->dont_send) {
436 /* Get actual TSO descriptors and unmap them */
437 tso_sqe =
438 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
439 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
440 tso_sqe->subdesc_cnt);
441 } else {
442 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
443 hdr->subdesc_cnt);
444 }
445 dev_kfree_skb_any(skb);
446next:
384 sq->head++; 447 sq->head++;
385 sq->head &= (sq->dmem.q_len - 1); 448 sq->head &= (sq->dmem.q_len - 1);
386 } 449 }
@@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
559 nicvf_send_msg_to_pf(nic, &mbx); 622 nicvf_send_msg_to_pf(nic, &mbx);
560 623
561 if (!nic->sqs_mode && (qidx == 0)) { 624 if (!nic->sqs_mode && (qidx == 0)) {
562 /* Enable checking L3/L4 length and TCP/UDP checksums */ 625 /* Enable checking L3/L4 length and TCP/UDP checksums
626 * Also allow IPv6 pkts with zero UDP checksum.
627 */
563 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 628 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
564 (BIT(24) | BIT(23) | BIT(21))); 629 (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
565 nicvf_config_vlan_stripping(nic, nic->netdev->features); 630 nicvf_config_vlan_stripping(nic, nic->netdev->features);
566 } 631 }
567 632
@@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
882 return qentry; 947 return qentry;
883} 948}
884 949
950/* Rollback to previous tail pointer when descriptors not used */
951static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
952 int qentry, int desc_cnt)
953{
954 sq->tail = qentry;
955 atomic_add(desc_cnt, &sq->free_cnt);
956}
957
885/* Free descriptor back to SQ for future use */ 958/* Free descriptor back to SQ for future use */
886void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) 959void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
887{ 960{
@@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1207 struct sk_buff *skb, u8 sq_num) 1280 struct sk_buff *skb, u8 sq_num)
1208{ 1281{
1209 int i, size; 1282 int i, size;
1210 int subdesc_cnt, tso_sqe = 0; 1283 int subdesc_cnt, hdr_sqe = 0;
1211 int qentry; 1284 int qentry;
1285 u64 dma_addr;
1212 1286
1213 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); 1287 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1214 if (subdesc_cnt > atomic_read(&sq->free_cnt)) 1288 if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1223 /* Add SQ header subdesc */ 1297 /* Add SQ header subdesc */
1224 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, 1298 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1225 skb, skb->len); 1299 skb, skb->len);
1226 tso_sqe = qentry; 1300 hdr_sqe = qentry;
1227 1301
1228 /* Add SQ gather subdescs */ 1302 /* Add SQ gather subdescs */
1229 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1303 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1230 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1304 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1231 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); 1305 /* HW will ensure data coherency, CPU sync not required */
1306 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1307 offset_in_page(skb->data), size,
1308 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1309 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1310 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1311 return 0;
1312 }
1313
1314 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1232 1315
1233 /* Check for scattered buffer */ 1316 /* Check for scattered buffer */
1234 if (!skb_is_nonlinear(skb)) 1317 if (!skb_is_nonlinear(skb))
@@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1241 1324
1242 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1325 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1243 size = skb_frag_size(frag); 1326 size = skb_frag_size(frag);
1244 nicvf_sq_add_gather_subdesc(sq, qentry, size, 1327 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1245 virt_to_phys( 1328 skb_frag_page(frag),
1246 skb_frag_address(frag))); 1329 frag->page_offset, size,
1330 DMA_TO_DEVICE,
1331 DMA_ATTR_SKIP_CPU_SYNC);
1332 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1333 /* Free entire chain of mapped buffers
1334 * here 'i' = frags mapped + above mapped skb->data
1335 */
1336 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1337 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1338 return 0;
1339 }
1340 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1247 } 1341 }
1248 1342
1249doorbell: 1343doorbell:
1250 if (nic->t88 && skb_shinfo(skb)->gso_size) { 1344 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1251 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1345 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1252 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); 1346 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
1253 } 1347 }
1254 1348
1255 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); 1349 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1282 int offset; 1376 int offset;
1283 u16 *rb_lens = NULL; 1377 u16 *rb_lens = NULL;
1284 u64 *rb_ptrs = NULL; 1378 u64 *rb_ptrs = NULL;
1379 u64 phys_addr;
1285 1380
1286 rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); 1381 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1287 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to 1382 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1296 else 1391 else
1297 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); 1392 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
1298 1393
1299 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1300 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1301
1302 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { 1394 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1303 payload_len = rb_lens[frag_num(frag)]; 1395 payload_len = rb_lens[frag_num(frag)];
1396 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1397 if (!phys_addr) {
1398 if (skb)
1399 dev_kfree_skb_any(skb);
1400 return NULL;
1401 }
1402
1304 if (!frag) { 1403 if (!frag) {
1305 /* First fragment */ 1404 /* First fragment */
1405 dma_unmap_page_attrs(&nic->pdev->dev,
1406 *rb_ptrs - cqe_rx->align_pad,
1407 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1408 DMA_ATTR_SKIP_CPU_SYNC);
1306 skb = nicvf_rb_ptr_to_skb(nic, 1409 skb = nicvf_rb_ptr_to_skb(nic,
1307 *rb_ptrs - cqe_rx->align_pad, 1410 phys_addr - cqe_rx->align_pad,
1308 payload_len); 1411 payload_len);
1309 if (!skb) 1412 if (!skb)
1310 return NULL; 1413 return NULL;
@@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1312 skb_put(skb, payload_len); 1415 skb_put(skb, payload_len);
1313 } else { 1416 } else {
1314 /* Add fragments */ 1417 /* Add fragments */
1315 page = virt_to_page(phys_to_virt(*rb_ptrs)); 1418 dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
1316 offset = phys_to_virt(*rb_ptrs) - page_address(page); 1419 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1420 DMA_ATTR_SKIP_CPU_SYNC);
1421 page = virt_to_page(phys_to_virt(phys_addr));
1422 offset = phys_to_virt(phys_addr) - page_address(page);
1317 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 1423 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1318 offset, payload_len, RCV_FRAG_LEN); 1424 offset, payload_len, RCV_FRAG_LEN);
1319 } 1425 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 5cb84da99a2d..10cb4b84625b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -87,7 +87,7 @@
87#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) 87#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
88#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) 88#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
89#define RBDR_THRESH (RCV_BUF_COUNT / 2) 89#define RBDR_THRESH (RCV_BUF_COUNT / 2)
90#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ 90#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
91#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ 91#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
92 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 92 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
93 93
@@ -301,6 +301,8 @@ struct queue_set {
301 301
302#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) 302#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
303 303
304void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
305 int hdr_sqe, u8 subdesc_cnt);
304void nicvf_config_vlan_stripping(struct nicvf *nic, 306void nicvf_config_vlan_stripping(struct nicvf *nic,
305 netdev_features_t features); 307 netdev_features_t features);
306int nicvf_set_qset_resources(struct nicvf *nic); 308int nicvf_set_qset_resources(struct nicvf *nic);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 4c8e8cf730bb..64a1095e4d14 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
123 return 1; 123 return 1;
124} 124}
125 125
126static int max_bgx_per_node;
127static void set_max_bgx_per_node(struct pci_dev *pdev)
128{
129 u16 sdevid;
130
131 if (max_bgx_per_node)
132 return;
133
134 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
135 switch (sdevid) {
136 case PCI_SUBSYS_DEVID_81XX_BGX:
137 max_bgx_per_node = MAX_BGX_PER_CN81XX;
138 break;
139 case PCI_SUBSYS_DEVID_83XX_BGX:
140 max_bgx_per_node = MAX_BGX_PER_CN83XX;
141 break;
142 case PCI_SUBSYS_DEVID_88XX_BGX:
143 default:
144 max_bgx_per_node = MAX_BGX_PER_CN88XX;
145 break;
146 }
147}
148
149static struct bgx *get_bgx(int node, int bgx_idx)
150{
151 int idx = (node * max_bgx_per_node) + bgx_idx;
152
153 return bgx_vnic[idx];
154}
155
126/* Return number of BGX present in HW */ 156/* Return number of BGX present in HW */
127unsigned bgx_get_map(int node) 157unsigned bgx_get_map(int node)
128{ 158{
129 int i; 159 int i;
130 unsigned map = 0; 160 unsigned map = 0;
131 161
132 for (i = 0; i < MAX_BGX_PER_NODE; i++) { 162 for (i = 0; i < max_bgx_per_node; i++) {
133 if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) 163 if (bgx_vnic[(node * max_bgx_per_node) + i])
134 map |= (1 << i); 164 map |= (1 << i);
135 } 165 }
136 166
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
143{ 173{
144 struct bgx *bgx; 174 struct bgx *bgx;
145 175
146 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 176 bgx = get_bgx(node, bgx_idx);
147 if (bgx) 177 if (bgx)
148 return bgx->lmac_count; 178 return bgx->lmac_count;
149 179
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
158 struct bgx *bgx; 188 struct bgx *bgx;
159 struct lmac *lmac; 189 struct lmac *lmac;
160 190
161 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 191 bgx = get_bgx(node, bgx_idx);
162 if (!bgx) 192 if (!bgx)
163 return; 193 return;
164 194
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
172 202
173const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 203const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
174{ 204{
175 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 205 struct bgx *bgx = get_bgx(node, bgx_idx);
176 206
177 if (bgx) 207 if (bgx)
178 return bgx->lmac[lmacid].mac; 208 return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
183 213
184void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 214void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
185{ 215{
186 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 216 struct bgx *bgx = get_bgx(node, bgx_idx);
187 217
188 if (!bgx) 218 if (!bgx)
189 return; 219 return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
194 224
195void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 225void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
196{ 226{
197 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 227 struct bgx *bgx = get_bgx(node, bgx_idx);
198 struct lmac *lmac; 228 struct lmac *lmac;
199 u64 cfg; 229 u64 cfg;
200 230
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
217void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) 247void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
218{ 248{
219 struct pfc *pfc = (struct pfc *)pause; 249 struct pfc *pfc = (struct pfc *)pause;
220 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 250 struct bgx *bgx = get_bgx(node, bgx_idx);
221 struct lmac *lmac; 251 struct lmac *lmac;
222 u64 cfg; 252 u64 cfg;
223 253
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
237void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) 267void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
238{ 268{
239 struct pfc *pfc = (struct pfc *)pause; 269 struct pfc *pfc = (struct pfc *)pause;
240 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 270 struct bgx *bgx = get_bgx(node, bgx_idx);
241 struct lmac *lmac; 271 struct lmac *lmac;
242 u64 cfg; 272 u64 cfg;
243 273
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
369{ 399{
370 struct bgx *bgx; 400 struct bgx *bgx;
371 401
372 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 402 bgx = get_bgx(node, bgx_idx);
373 if (!bgx) 403 if (!bgx)
374 return 0; 404 return 0;
375 405
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
383{ 413{
384 struct bgx *bgx; 414 struct bgx *bgx;
385 415
386 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 416 bgx = get_bgx(node, bgx_idx);
387 if (!bgx) 417 if (!bgx)
388 return 0; 418 return 0;
389 419
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
411 struct lmac *lmac; 441 struct lmac *lmac;
412 u64 cfg; 442 u64 cfg;
413 443
414 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; 444 bgx = get_bgx(node, bgx_idx);
415 if (!bgx) 445 if (!bgx)
416 return; 446 return;
417 447
@@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
1011 dev_info(dev, "%s: 40G_KR4\n", (char *)str); 1041 dev_info(dev, "%s: 40G_KR4\n", (char *)str);
1012 break; 1042 break;
1013 case BGX_MODE_QSGMII: 1043 case BGX_MODE_QSGMII:
1014 if ((lmacid == 0) &&
1015 (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
1016 return;
1017 if ((lmacid == 2) &&
1018 (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
1019 return;
1020 dev_info(dev, "%s: QSGMII\n", (char *)str); 1044 dev_info(dev, "%s: QSGMII\n", (char *)str);
1021 break; 1045 break;
1022 case BGX_MODE_RGMII: 1046 case BGX_MODE_RGMII:
@@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1334 goto err_release_regions; 1358 goto err_release_regions;
1335 } 1359 }
1336 1360
1361 set_max_bgx_per_node(pdev);
1362
1337 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1363 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1338 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1364 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1339 bgx->bgx_id = (pci_resource_start(pdev, 1365 bgx->bgx_id = (pci_resource_start(pdev,
1340 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; 1366 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1341 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1367 bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
1342 bgx->max_lmac = MAX_LMAC_PER_BGX; 1368 bgx->max_lmac = MAX_LMAC_PER_BGX;
1343 bgx_vnic[bgx->bgx_id] = bgx; 1369 bgx_vnic[bgx->bgx_id] = bgx;
1344 } else { 1370 } else {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index a60f189429bb..c5080f2cead5 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -22,7 +22,6 @@
22#define MAX_BGX_PER_CN88XX 2 22#define MAX_BGX_PER_CN88XX 2
23#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */ 23#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
24#define MAX_BGX_PER_CN83XX 4 24#define MAX_BGX_PER_CN83XX 4
25#define MAX_BGX_PER_NODE 4
26#define MAX_LMAC_PER_BGX 4 25#define MAX_LMAC_PER_BGX 4
27#define MAX_BGX_CHANS_PER_LMAC 16 26#define MAX_BGX_CHANS_PER_LMAC 16
28#define MAX_DMAC_PER_LMAC 8 27#define MAX_DMAC_PER_LMAC 8
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 275c2e2349ad..c44036d5761a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
2589static int emac_dt_phy_connect(struct emac_instance *dev, 2589static int emac_dt_phy_connect(struct emac_instance *dev,
2590 struct device_node *phy_handle) 2590 struct device_node *phy_handle)
2591{ 2591{
2592 int res;
2593
2594 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def), 2592 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2595 GFP_KERNEL); 2593 GFP_KERNEL);
2596 if (!dev->phy.def) 2594 if (!dev->phy.def)
@@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
2617{ 2615{
2618 struct device_node *np = dev->ofdev->dev.of_node; 2616 struct device_node *np = dev->ofdev->dev.of_node;
2619 struct device_node *phy_handle; 2617 struct device_node *phy_handle;
2620 int res = 0; 2618 int res = 1;
2621 2619
2622 phy_handle = of_parse_phandle(np, "phy-handle", 0); 2620 phy_handle = of_parse_phandle(np, "phy-handle", 0);
2623 2621
@@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
2714 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { 2712 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2715 int res = emac_dt_phy_probe(dev); 2713 int res = emac_dt_phy_probe(dev);
2716 2714
2717 mutex_unlock(&emac_phy_map_lock); 2715 switch (res) {
2718 if (!res) 2716 case 1:
2717 /* No phy-handle property configured.
2718 * Continue with the existing phy probe
2719 * and setup code.
2720 */
2721 break;
2722
2723 case 0:
2724 mutex_unlock(&emac_phy_map_lock);
2719 goto init_phy; 2725 goto init_phy;
2720 2726
2721 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n", 2727 default:
2722 res); 2728 mutex_unlock(&emac_phy_map_lock);
2723 return res; 2729 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2730 res);
2731 return res;
2732 }
2724 } 2733 }
2725 2734
2726 if (dev->phy_address != 0xffffffff) 2735 if (dev->phy_address != 0xffffffff)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9198e6bd5160..5f11b4dc95d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
404 send_map_query(adapter); 404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) { 405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i], 406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 IBMVNIC_BUFFS_PER_POOL, i, 407 adapter->req_rx_add_entries_per_subcrq, i,
408 be64_to_cpu(size_array[i]), 1); 408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) { 409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n"); 410 dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
419 for (i = 0; i < tx_subcrqs; i++) { 419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i]; 420 tx_pool = &adapter->tx_pool[i];
421 tx_pool->tx_buff = 421 tx_pool->tx_buff =
422 kcalloc(adapter->max_tx_entries_per_subcrq, 422 kcalloc(adapter->req_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); 423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff) 424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed; 425 goto tx_pool_alloc_failed;
426 426
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->max_tx_entries_per_subcrq * 428 adapter->req_tx_entries_per_subcrq *
429 adapter->req_mtu)) 429 adapter->req_mtu))
430 goto tx_ltb_alloc_failed; 430 goto tx_ltb_alloc_failed;
431 431
432 tx_pool->free_map = 432 tx_pool->free_map =
433 kcalloc(adapter->max_tx_entries_per_subcrq, 433 kcalloc(adapter->req_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL); 434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map) 435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed; 436 goto tx_fm_alloc_failed;
437 437
438 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++) 438 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j; 439 tx_pool->free_map[j] = j;
440 440
441 tx_pool->consumer_index = 0; 441 tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
706 struct device *dev = &adapter->vdev->dev; 706 struct device *dev = &adapter->vdev->dev;
707 struct ibmvnic_tx_buff *tx_buff = NULL; 707 struct ibmvnic_tx_buff *tx_buff = NULL;
708 struct ibmvnic_sub_crq_queue *tx_scrq;
708 struct ibmvnic_tx_pool *tx_pool; 709 struct ibmvnic_tx_pool *tx_pool;
709 unsigned int tx_send_failed = 0; 710 unsigned int tx_send_failed = 0;
710 unsigned int tx_map_failed = 0; 711 unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
724 int ret = 0; 725 int ret = 0;
725 726
726 tx_pool = &adapter->tx_pool[queue_num]; 727 tx_pool = &adapter->tx_pool[queue_num];
728 tx_scrq = adapter->tx_scrq[queue_num];
727 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); 729 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
728 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 730 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
729 be32_to_cpu(adapter->login_rsp_buf-> 731 be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
744 746
745 tx_pool->consumer_index = 747 tx_pool->consumer_index =
746 (tx_pool->consumer_index + 1) % 748 (tx_pool->consumer_index + 1) %
747 adapter->max_tx_entries_per_subcrq; 749 adapter->req_tx_entries_per_subcrq;
748 750
749 tx_buff = &tx_pool->tx_buff[index]; 751 tx_buff = &tx_pool->tx_buff[index];
750 tx_buff->skb = skb; 752 tx_buff->skb = skb;
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
817 819
818 if (tx_pool->consumer_index == 0) 820 if (tx_pool->consumer_index == 0)
819 tx_pool->consumer_index = 821 tx_pool->consumer_index =
820 adapter->max_tx_entries_per_subcrq - 1; 822 adapter->req_tx_entries_per_subcrq - 1;
821 else 823 else
822 tx_pool->consumer_index--; 824 tx_pool->consumer_index--;
823 825
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
826 ret = NETDEV_TX_BUSY; 828 ret = NETDEV_TX_BUSY;
827 goto out; 829 goto out;
828 } 830 }
831
832 atomic_inc(&tx_scrq->used);
833
834 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
835 netdev_info(netdev, "Stopping queue %d\n", queue_num);
836 netif_stop_subqueue(netdev, queue_num);
837 }
838
829 tx_packets++; 839 tx_packets++;
830 tx_bytes += skb->len; 840 tx_bytes += skb->len;
831 txq->trans_start = jiffies; 841 txq->trans_start = jiffies;
@@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1213 scrq->adapter = adapter; 1223 scrq->adapter = adapter;
1214 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 1224 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1215 scrq->cur = 0; 1225 scrq->cur = 0;
1226 atomic_set(&scrq->used, 0);
1216 scrq->rx_skb_top = NULL; 1227 scrq->rx_skb_top = NULL;
1217 spin_lock_init(&scrq->lock); 1228 spin_lock_init(&scrq->lock);
1218 1229
@@ -1355,14 +1366,28 @@ restart_loop:
1355 DMA_TO_DEVICE); 1366 DMA_TO_DEVICE);
1356 } 1367 }
1357 1368
1358 if (txbuff->last_frag) 1369 if (txbuff->last_frag) {
1370 atomic_dec(&scrq->used);
1371
1372 if (atomic_read(&scrq->used) <=
1373 (adapter->req_tx_entries_per_subcrq / 2) &&
1374 netif_subqueue_stopped(adapter->netdev,
1375 txbuff->skb)) {
1376 netif_wake_subqueue(adapter->netdev,
1377 scrq->pool_index);
1378 netdev_dbg(adapter->netdev,
1379 "Started queue %d\n",
1380 scrq->pool_index);
1381 }
1382
1359 dev_kfree_skb_any(txbuff->skb); 1383 dev_kfree_skb_any(txbuff->skb);
1384 }
1360 1385
1361 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. 1386 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1362 producer_index] = index; 1387 producer_index] = index;
1363 adapter->tx_pool[pool].producer_index = 1388 adapter->tx_pool[pool].producer_index =
1364 (adapter->tx_pool[pool].producer_index + 1) % 1389 (adapter->tx_pool[pool].producer_index + 1) %
1365 adapter->max_tx_entries_per_subcrq; 1390 adapter->req_tx_entries_per_subcrq;
1366 } 1391 }
1367 /* remove tx_comp scrq*/ 1392 /* remove tx_comp scrq*/
1368 next->tx_comp.first = 0; 1393 next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 422824f1f42a..1993b42666f7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
863 spinlock_t lock; 863 spinlock_t lock;
864 struct sk_buff *rx_skb_top; 864 struct sk_buff *rx_skb_top;
865 struct ibmvnic_adapter *adapter; 865 struct ibmvnic_adapter *adapter;
866 atomic_t used;
866}; 867};
867 868
868struct ibmvnic_long_term_buff { 869struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ddb4ca4ff930..117170014e88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE
14config MLX5_CORE_EN 14config MLX5_CORE_EN
15 bool "Mellanox Technologies ConnectX-4 Ethernet support" 15 bool "Mellanox Technologies ConnectX-4 Ethernet support"
16 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 16 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
17 depends on IPV6=y || IPV6=n || MLX5_CORE=m
17 imply PTP_1588_CLOCK 18 imply PTP_1588_CLOCK
18 default n 19 default n
19 ---help--- 20 ---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0523ed47f597..8fa23f6a1f67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
302 struct mlx5e_priv *priv = netdev_priv(dev); 302 struct mlx5e_priv *priv = netdev_priv(dev);
303 struct mlx5e_dcbx *dcbx = &priv->dcbx; 303 struct mlx5e_dcbx *dcbx = &priv->dcbx;
304 304
305 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
306 return 1;
307
305 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { 308 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
306 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO) 309 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
307 return 0; 310 return 0;
@@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
315 return 1; 318 return 1;
316 } 319 }
317 320
318 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) 321 if (!(mode & DCB_CAP_DCBX_HOST))
319 return 1; 322 return 1;
320 323
321 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || 324 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
322 !(mode & DCB_CAP_DCBX_VER_CEE) ||
323 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
324 !(mode & DCB_CAP_DCBX_HOST))
325 return 1; 325 return 1;
326 326
327 return 0; 327 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 31e3cb7ee5fe..5621dcfda4f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
204 struct iphdr *iph; 204 struct iphdr *iph;
205 205
206 /* We are only going to peek, no need to clone the SKB */ 206 /* We are only going to peek, no need to clone the SKB */
207 if (skb->protocol != htons(ETH_P_IP))
208 goto out;
209
210 if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) 207 if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
211 goto out; 208 goto out;
212 209
@@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
249 lbtp->loopback_ok = false; 246 lbtp->loopback_ok = false;
250 init_completion(&lbtp->comp); 247 init_completion(&lbtp->comp);
251 248
252 lbtp->pt.type = htons(ETH_P_ALL); 249 lbtp->pt.type = htons(ETH_P_IP);
253 lbtp->pt.func = mlx5e_test_loopback_validate; 250 lbtp->pt.func = mlx5e_test_loopback_validate;
254 lbtp->pt.dev = priv->netdev; 251 lbtp->pt.dev = priv->netdev;
255 lbtp->pt.af_packet_priv = lbtp; 252 lbtp->pt.af_packet_priv = lbtp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 44406a5ec15d..79481f4cf264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -48,9 +48,14 @@
48#include "eswitch.h" 48#include "eswitch.h"
49#include "vxlan.h" 49#include "vxlan.h"
50 50
51enum {
52 MLX5E_TC_FLOW_ESWITCH = BIT(0),
53};
54
51struct mlx5e_tc_flow { 55struct mlx5e_tc_flow {
52 struct rhash_head node; 56 struct rhash_head node;
53 u64 cookie; 57 u64 cookie;
58 u8 flags;
54 struct mlx5_flow_handle *rule; 59 struct mlx5_flow_handle *rule;
55 struct list_head encap; /* flows sharing the same encap */ 60 struct list_head encap; /* flows sharing the same encap */
56 struct mlx5_esw_flow_attr *attr; 61 struct mlx5_esw_flow_attr *attr;
@@ -177,7 +182,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
177 mlx5_fc_destroy(priv->mdev, counter); 182 mlx5_fc_destroy(priv->mdev, counter);
178 } 183 }
179 184
180 if (esw && esw->mode == SRIOV_OFFLOADS) { 185 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
181 mlx5_eswitch_del_vlan_action(esw, flow->attr); 186 mlx5_eswitch_del_vlan_action(esw, flow->attr);
182 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 187 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
183 mlx5e_detach_encap(priv, flow); 188 mlx5e_detach_encap(priv, flow);
@@ -598,6 +603,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
598} 603}
599 604
600static int parse_cls_flower(struct mlx5e_priv *priv, 605static int parse_cls_flower(struct mlx5e_priv *priv,
606 struct mlx5e_tc_flow *flow,
601 struct mlx5_flow_spec *spec, 607 struct mlx5_flow_spec *spec,
602 struct tc_cls_flower_offload *f) 608 struct tc_cls_flower_offload *f)
603{ 609{
@@ -609,7 +615,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
609 615
610 err = __parse_cls_flower(priv, spec, f, &min_inline); 616 err = __parse_cls_flower(priv, spec, f, &min_inline);
611 617
612 if (!err && esw->mode == SRIOV_OFFLOADS && 618 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
613 rep->vport != FDB_UPLINK_VPORT) { 619 rep->vport != FDB_UPLINK_VPORT) {
614 if (min_inline > esw->offloads.inline_mode) { 620 if (min_inline > esw->offloads.inline_mode) {
615 netdev_warn(priv->netdev, 621 netdev_warn(priv->netdev,
@@ -1132,23 +1138,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1132 struct tc_cls_flower_offload *f) 1138 struct tc_cls_flower_offload *f)
1133{ 1139{
1134 struct mlx5e_tc_table *tc = &priv->fs.tc; 1140 struct mlx5e_tc_table *tc = &priv->fs.tc;
1135 int err = 0; 1141 int err, attr_size = 0;
1136 bool fdb_flow = false;
1137 u32 flow_tag, action; 1142 u32 flow_tag, action;
1138 struct mlx5e_tc_flow *flow; 1143 struct mlx5e_tc_flow *flow;
1139 struct mlx5_flow_spec *spec; 1144 struct mlx5_flow_spec *spec;
1140 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1145 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1146 u8 flow_flags = 0;
1141 1147
1142 if (esw && esw->mode == SRIOV_OFFLOADS) 1148 if (esw && esw->mode == SRIOV_OFFLOADS) {
1143 fdb_flow = true; 1149 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1144 1150 attr_size = sizeof(struct mlx5_esw_flow_attr);
1145 if (fdb_flow) 1151 }
1146 flow = kzalloc(sizeof(*flow) +
1147 sizeof(struct mlx5_esw_flow_attr),
1148 GFP_KERNEL);
1149 else
1150 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1151 1152
1153 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1152 spec = mlx5_vzalloc(sizeof(*spec)); 1154 spec = mlx5_vzalloc(sizeof(*spec));
1153 if (!spec || !flow) { 1155 if (!spec || !flow) {
1154 err = -ENOMEM; 1156 err = -ENOMEM;
@@ -1156,12 +1158,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1156 } 1158 }
1157 1159
1158 flow->cookie = f->cookie; 1160 flow->cookie = f->cookie;
1161 flow->flags = flow_flags;
1159 1162
1160 err = parse_cls_flower(priv, spec, f); 1163 err = parse_cls_flower(priv, flow, spec, f);
1161 if (err < 0) 1164 if (err < 0)
1162 goto err_free; 1165 goto err_free;
1163 1166
1164 if (fdb_flow) { 1167 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1165 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1); 1168 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
1166 err = parse_tc_fdb_actions(priv, f->exts, flow); 1169 err = parse_tc_fdb_actions(priv, f->exts, flow);
1167 if (err < 0) 1170 if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2478516a61e2..ded27bb9a3b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1136 u32 *match_criteria) 1136 u32 *match_criteria)
1137{ 1137{
1138 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 1138 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1139 struct list_head *prev = ft->node.children.prev; 1139 struct list_head *prev = &ft->node.children;
1140 unsigned int candidate_index = 0; 1140 unsigned int candidate_index = 0;
1141 struct mlx5_flow_group *fg; 1141 struct mlx5_flow_group *fg;
1142 void *match_criteria_addr; 1142 void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c4242a4e8130..e2bd600d19de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
1352 if (err) 1352 if (err)
1353 goto clean_load; 1353 goto clean_load;
1354 1354
1355 pci_save_state(pdev);
1355 return 0; 1356 return 0;
1356 1357
1357clean_load: 1358clean_load:
@@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1407 1408
1408 mlx5_enter_error_state(dev); 1409 mlx5_enter_error_state(dev);
1409 mlx5_unload_one(dev, priv, false); 1410 mlx5_unload_one(dev, priv, false);
1410 /* In case of kernel call save the pci state and drain the health wq */ 1411 /* In case of kernel call drain the health wq */
1411 if (state) { 1412 if (state) {
1412 pci_save_state(pdev);
1413 mlx5_drain_health_wq(dev); 1413 mlx5_drain_health_wq(dev);
1414 mlx5_pci_disable_device(dev); 1414 mlx5_pci_disable_device(dev);
1415 } 1415 }
@@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1461 1461
1462 pci_set_master(pdev); 1462 pci_set_master(pdev);
1463 pci_restore_state(pdev); 1463 pci_restore_state(pdev);
1464 pci_save_state(pdev);
1464 1465
1465 if (wait_vital(pdev)) { 1466 if (wait_vital(pdev)) {
1466 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); 1467 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0899e2d310e2..d9616daf8a70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
769#define MLXSW_REG_SPVM_ID 0x200F 769#define MLXSW_REG_SPVM_ID 0x200F
770#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ 770#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
771#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ 771#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
772#define MLXSW_REG_SPVM_REC_MAX_COUNT 256 772#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
773#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ 773#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
774 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) 774 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
775 775
@@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
1702#define MLXSW_REG_SPVMLR_ID 0x2020 1702#define MLXSW_REG_SPVMLR_ID 0x2020
1703#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ 1703#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
1704#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ 1704#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
1705#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 1705#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
1706#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ 1706#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
1707 MLXSW_REG_SPVMLR_REC_LEN * \ 1707 MLXSW_REG_SPVMLR_REC_LEN * \
1708 MLXSW_REG_SPVMLR_REC_MAX_COUNT) 1708 MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 22ab42925377..ae6cccc666e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
303 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, 303 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
304 ingress, 304 ingress,
305 MLXSW_SP_ACL_PROFILE_FLOWER); 305 MLXSW_SP_ACL_PROFILE_FLOWER);
306 if (WARN_ON(IS_ERR(ruleset))) 306 if (IS_ERR(ruleset))
307 return; 307 return;
308 308
309 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 309 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
310 if (!WARN_ON(!rule)) { 310 if (rule) {
311 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 311 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
312 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 312 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
313 } 313 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03df751a..7e3a6fed3da6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
422 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; 422 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
423 u32 cxt_size = CONN_CXT_SIZE(p_hwfn); 423 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
424 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 424 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
425 u32 align = elems_per_page * DQ_RANGE_ALIGN;
425 426
426 p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page); 427 p_conn->cid_count = roundup(p_conn->cid_count, align);
427 } 428 }
428} 429}
429 430
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e2a081ceaf52..e518f914eab1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
2389 * size/capacity fields are of a u32 type. 2389 * size/capacity fields are of a u32 type.
2390 */ 2390 */
2391 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && 2391 if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
2392 chain_size > 0x10000) || 2392 chain_size > ((u32)U16_MAX + 1)) ||
2393 (cnt_type == QED_CHAIN_CNT_TYPE_U32 && 2393 (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
2394 chain_size > 0x100000000ULL)) {
2395 DP_NOTICE(cdev, 2394 DP_NOTICE(cdev,
2396 "The actual chain size (0x%llx) is larger than the maximal possible value\n", 2395 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
2397 chain_size); 2396 chain_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 3a44d6b395fa..098766f7fe88 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
190 p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; 190 p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
191 p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; 191 p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
192 p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; 192 p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
193 p_init->ooo_enable = p_params->ooo_enable;
194 p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
195 p_params->ll2_ooo_queue_id;
193 p_init->func_params.log_page_size = p_params->log_page_size; 196 p_init->func_params.log_page_size = p_params->log_page_size;
194 val = p_params->num_tasks; 197 val = p_params->num_tasks;
195 p_init->func_params.num_tasks = cpu_to_le16(val); 198 p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
786 spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); 789 spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
787} 790}
788 791
792void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
793 struct qed_iscsi_conn *p_conn)
794{
795 qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
796 qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
797 qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
798 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
799 sizeof(struct tcp_upload_params),
800 p_conn->tcp_upload_params_virt_addr,
801 p_conn->tcp_upload_params_phys_addr);
802 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
803 sizeof(struct scsi_terminate_extra_params),
804 p_conn->queue_cnts_virt_addr,
805 p_conn->queue_cnts_phys_addr);
806 kfree(p_conn);
807}
808
789struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn) 809struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
790{ 810{
791 struct qed_iscsi_info *p_iscsi_info; 811 struct qed_iscsi_info *p_iscsi_info;
@@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
807void qed_iscsi_free(struct qed_hwfn *p_hwfn, 827void qed_iscsi_free(struct qed_hwfn *p_hwfn,
808 struct qed_iscsi_info *p_iscsi_info) 828 struct qed_iscsi_info *p_iscsi_info)
809{ 829{
830 struct qed_iscsi_conn *p_conn = NULL;
831
832 while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
833 p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
834 struct qed_iscsi_conn, list_entry);
835 if (p_conn) {
836 list_del(&p_conn->list_entry);
837 qed_iscsi_free_connection(p_hwfn, p_conn);
838 }
839 }
840
810 kfree(p_iscsi_info); 841 kfree(p_iscsi_info);
811} 842}
812 843
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af10a57..0d3cef409c96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
211 /* If need to reuse or there's no replacement buffer, repost this */ 211 /* If need to reuse or there's no replacement buffer, repost this */
212 if (rc) 212 if (rc)
213 goto out_post; 213 goto out_post;
214 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
215 cdev->ll2->rx_size, DMA_FROM_DEVICE);
214 216
215 skb = build_skb(buffer->data, 0); 217 skb = build_skb(buffer->data, 0);
216 if (!skb) { 218 if (!skb) {
@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
474static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, 476static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
475 struct qed_ll2_info *p_ll2_conn, 477 struct qed_ll2_info *p_ll2_conn,
476 union core_rx_cqe_union *p_cqe, 478 union core_rx_cqe_union *p_cqe,
477 unsigned long lock_flags, 479 unsigned long *p_lock_flags,
478 bool b_last_cqe) 480 bool b_last_cqe)
479{ 481{
480 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 482 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
495 "Mismatch between active_descq and the LL2 Rx chain\n"); 497 "Mismatch between active_descq and the LL2 Rx chain\n");
496 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); 498 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
497 499
498 spin_unlock_irqrestore(&p_rx->lock, lock_flags); 500 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
499 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, 501 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
500 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); 502 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
501 spin_lock_irqsave(&p_rx->lock, lock_flags); 503 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
502 504
503 return 0; 505 return 0;
504} 506}
@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
538 break; 540 break;
539 case CORE_RX_CQE_TYPE_REGULAR: 541 case CORE_RX_CQE_TYPE_REGULAR:
540 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, 542 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
541 cqe, flags, b_last_cqe); 543 cqe, &flags,
544 b_last_cqe);
542 break; 545 break;
543 default: 546 default:
544 rc = -EIO; 547 rc = -EIO;
@@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
968{ 971{
969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 972 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
970 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 973 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
971 struct qed_ll2_conn ll2_info; 974 struct qed_ll2_conn ll2_info = { 0 };
972 int rc; 975 int rc;
973 976
974 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; 977 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 7d731c6cb892..378afce58b3f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
159 if (!p_ooo_info->ooo_history.p_cqes) 159 if (!p_ooo_info->ooo_history.p_cqes)
160 goto no_history_mem; 160 goto no_history_mem;
161 161
162 p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
163
162 return p_ooo_info; 164 return p_ooo_info;
163 165
164no_history_mem: 166no_history_mem:
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 6d31f92ef2b6..84ac50f92c9c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp {
1162struct ib_mac_iocb_rsp { 1162struct ib_mac_iocb_rsp {
1163 u8 opcode; /* 0x20 */ 1163 u8 opcode; /* 0x20 */
1164 u8 flags1; 1164 u8 flags1;
1165#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ 1165#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
1166#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ 1166#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
1167#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ 1167#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
1168#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ 1168#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
1169#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ 1169#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 65077c77082a..91e9bd7159ab 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
1535 * Ethtool support 1535 * Ethtool support
1536 */ 1536 */
1537static int 1537static int
1538smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1538smc_ethtool_get_link_ksettings(struct net_device *dev,
1539 struct ethtool_link_ksettings *cmd)
1539{ 1540{
1540 struct smc_local *lp = netdev_priv(dev); 1541 struct smc_local *lp = netdev_priv(dev);
1541 int ret; 1542 int ret;
1542 1543
1543 cmd->maxtxpkt = 1;
1544 cmd->maxrxpkt = 1;
1545
1546 if (lp->phy_type != 0) { 1544 if (lp->phy_type != 0) {
1547 spin_lock_irq(&lp->lock); 1545 spin_lock_irq(&lp->lock);
1548 ret = mii_ethtool_gset(&lp->mii, cmd); 1546 ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
1549 spin_unlock_irq(&lp->lock); 1547 spin_unlock_irq(&lp->lock);
1550 } else { 1548 } else {
1551 cmd->supported = SUPPORTED_10baseT_Half | 1549 u32 supported = SUPPORTED_10baseT_Half |
1552 SUPPORTED_10baseT_Full | 1550 SUPPORTED_10baseT_Full |
1553 SUPPORTED_TP | SUPPORTED_AUI; 1551 SUPPORTED_TP | SUPPORTED_AUI;
1554 1552
1555 if (lp->ctl_rspeed == 10) 1553 if (lp->ctl_rspeed == 10)
1556 ethtool_cmd_speed_set(cmd, SPEED_10); 1554 cmd->base.speed = SPEED_10;
1557 else if (lp->ctl_rspeed == 100) 1555 else if (lp->ctl_rspeed == 100)
1558 ethtool_cmd_speed_set(cmd, SPEED_100); 1556 cmd->base.speed = SPEED_100;
1557
1558 cmd->base.autoneg = AUTONEG_DISABLE;
1559 cmd->base.port = 0;
1560 cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
1561 DUPLEX_FULL : DUPLEX_HALF;
1559 1562
1560 cmd->autoneg = AUTONEG_DISABLE; 1563 ethtool_convert_legacy_u32_to_link_mode(
1561 cmd->transceiver = XCVR_INTERNAL; 1564 cmd->link_modes.supported, supported);
1562 cmd->port = 0;
1563 cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
1564 1565
1565 ret = 0; 1566 ret = 0;
1566 } 1567 }
@@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1569} 1570}
1570 1571
1571static int 1572static int
1572smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) 1573smc_ethtool_set_link_ksettings(struct net_device *dev,
1574 const struct ethtool_link_ksettings *cmd)
1573{ 1575{
1574 struct smc_local *lp = netdev_priv(dev); 1576 struct smc_local *lp = netdev_priv(dev);
1575 int ret; 1577 int ret;
1576 1578
1577 if (lp->phy_type != 0) { 1579 if (lp->phy_type != 0) {
1578 spin_lock_irq(&lp->lock); 1580 spin_lock_irq(&lp->lock);
1579 ret = mii_ethtool_sset(&lp->mii, cmd); 1581 ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
1580 spin_unlock_irq(&lp->lock); 1582 spin_unlock_irq(&lp->lock);
1581 } else { 1583 } else {
1582 if (cmd->autoneg != AUTONEG_DISABLE || 1584 if (cmd->base.autoneg != AUTONEG_DISABLE ||
1583 cmd->speed != SPEED_10 || 1585 cmd->base.speed != SPEED_10 ||
1584 (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || 1586 (cmd->base.duplex != DUPLEX_HALF &&
1585 (cmd->port != PORT_TP && cmd->port != PORT_AUI)) 1587 cmd->base.duplex != DUPLEX_FULL) ||
1588 (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
1586 return -EINVAL; 1589 return -EINVAL;
1587 1590
1588// lp->port = cmd->port; 1591// lp->port = cmd->base.port;
1589 lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; 1592 lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
1590 1593
1591// if (netif_running(dev)) 1594// if (netif_running(dev))
1592// smc_set_port(dev); 1595// smc_set_port(dev);
@@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
1744 1747
1745 1748
1746static const struct ethtool_ops smc_ethtool_ops = { 1749static const struct ethtool_ops smc_ethtool_ops = {
1747 .get_settings = smc_ethtool_getsettings,
1748 .set_settings = smc_ethtool_setsettings,
1749 .get_drvinfo = smc_ethtool_getdrvinfo, 1750 .get_drvinfo = smc_ethtool_getdrvinfo,
1750 1751
1751 .get_msglevel = smc_ethtool_getmsglevel, 1752 .get_msglevel = smc_ethtool_getmsglevel,
@@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
1755 .get_eeprom_len = smc_ethtool_geteeprom_len, 1756 .get_eeprom_len = smc_ethtool_geteeprom_len,
1756 .get_eeprom = smc_ethtool_geteeprom, 1757 .get_eeprom = smc_ethtool_geteeprom,
1757 .set_eeprom = smc_ethtool_seteeprom, 1758 .set_eeprom = smc_ethtool_seteeprom,
1759 .get_link_ksettings = smc_ethtool_get_link_ksettings,
1760 .set_link_ksettings = smc_ethtool_set_link_ksettings,
1758}; 1761};
1759 1762
1760static const struct net_device_ops smc_netdev_ops = { 1763static const struct net_device_ops smc_netdev_ops = {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d3e73ac158ae..f9f3dba7a588 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -700,6 +700,8 @@ struct net_device_context {
700 700
701 u32 tx_checksum_mask; 701 u32 tx_checksum_mask;
702 702
703 u32 tx_send_table[VRSS_SEND_TAB_SIZE];
704
703 /* Ethtool settings */ 705 /* Ethtool settings */
704 u8 duplex; 706 u8 duplex;
705 u32 speed; 707 u32 speed;
@@ -757,7 +759,6 @@ struct netvsc_device {
757 759
758 struct nvsp_message revoke_packet; 760 struct nvsp_message revoke_packet;
759 761
760 u32 send_table[VRSS_SEND_TAB_SIZE];
761 u32 max_chn; 762 u32 max_chn;
762 u32 num_chn; 763 u32 num_chn;
763 spinlock_t sc_lock; /* Protects num_sc_offered variable */ 764 spinlock_t sc_lock; /* Protects num_sc_offered variable */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d35ebd993b38..4c1d8cca247b 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
1136static void netvsc_send_table(struct hv_device *hdev, 1136static void netvsc_send_table(struct hv_device *hdev,
1137 struct nvsp_message *nvmsg) 1137 struct nvsp_message *nvmsg)
1138{ 1138{
1139 struct netvsc_device *nvscdev;
1140 struct net_device *ndev = hv_get_drvdata(hdev); 1139 struct net_device *ndev = hv_get_drvdata(hdev);
1140 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1141 int i; 1141 int i;
1142 u32 count, *tab; 1142 u32 count, *tab;
1143 1143
1144 nvscdev = get_outbound_net_device(hdev);
1145 if (!nvscdev)
1146 return;
1147
1148 count = nvmsg->msg.v5_msg.send_table.count; 1144 count = nvmsg->msg.v5_msg.send_table.count;
1149 if (count != VRSS_SEND_TAB_SIZE) { 1145 if (count != VRSS_SEND_TAB_SIZE) {
1150 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 1146 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
1155 nvmsg->msg.v5_msg.send_table.offset); 1151 nvmsg->msg.v5_msg.send_table.offset);
1156 1152
1157 for (i = 0; i < count; i++) 1153 for (i = 0; i < count; i++)
1158 nvscdev->send_table[i] = tab[i]; 1154 net_device_ctx->tx_send_table[i] = tab[i];
1159} 1155}
1160 1156
1161static void netvsc_send_vf(struct net_device_context *net_device_ctx, 1157static void netvsc_send_vf(struct net_device_context *net_device_ctx,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bc05c895d958..5ede87f30463 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
206 void *accel_priv, select_queue_fallback_t fallback) 206 void *accel_priv, select_queue_fallback_t fallback)
207{ 207{
208 struct net_device_context *net_device_ctx = netdev_priv(ndev); 208 struct net_device_context *net_device_ctx = netdev_priv(ndev);
209 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; 209 unsigned int num_tx_queues = ndev->real_num_tx_queues;
210 struct sock *sk = skb->sk; 210 struct sock *sk = skb->sk;
211 int q_idx = sk_tx_queue_get(sk); 211 int q_idx = sk_tx_queue_get(sk);
212 212
213 if (q_idx < 0 || skb->ooo_okay || 213 if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
214 q_idx >= ndev->real_num_tx_queues) {
215 u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); 214 u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
216 int new_idx; 215 int new_idx;
217 216
218 new_idx = nvsc_dev->send_table[hash] 217 new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
219 % nvsc_dev->num_chn;
220 218
221 if (q_idx != new_idx && sk && 219 if (q_idx != new_idx && sk &&
222 sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) 220 sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
@@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
225 q_idx = new_idx; 223 q_idx = new_idx;
226 } 224 }
227 225
228 if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
229 q_idx = 0;
230
231 return q_idx; 226 return q_idx;
232} 227}
233 228
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f9d0fa315a47..272b051a0199 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
1883 return m88e1510_hwmon_probe(phydev); 1883 return m88e1510_hwmon_probe(phydev);
1884} 1884}
1885 1885
1886static void marvell_remove(struct phy_device *phydev)
1887{
1888#ifdef CONFIG_HWMON
1889
1890 struct marvell_priv *priv = phydev->priv;
1891
1892 if (priv && priv->hwmon_dev)
1893 hwmon_device_unregister(priv->hwmon_dev);
1894#endif
1895}
1896
1897static struct phy_driver marvell_drivers[] = { 1886static struct phy_driver marvell_drivers[] = {
1898 { 1887 {
1899 .phy_id = MARVELL_PHY_ID_88E1101, 1888 .phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
1974 .features = PHY_GBIT_FEATURES, 1963 .features = PHY_GBIT_FEATURES,
1975 .flags = PHY_HAS_INTERRUPT, 1964 .flags = PHY_HAS_INTERRUPT,
1976 .probe = &m88e1121_probe, 1965 .probe = &m88e1121_probe,
1977 .remove = &marvell_remove,
1978 .config_init = &m88e1121_config_init, 1966 .config_init = &m88e1121_config_init,
1979 .config_aneg = &m88e1121_config_aneg, 1967 .config_aneg = &m88e1121_config_aneg,
1980 .read_status = &marvell_read_status, 1968 .read_status = &marvell_read_status,
@@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
2087 .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, 2075 .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
2088 .flags = PHY_HAS_INTERRUPT, 2076 .flags = PHY_HAS_INTERRUPT,
2089 .probe = &m88e1510_probe, 2077 .probe = &m88e1510_probe,
2090 .remove = &marvell_remove,
2091 .config_init = &m88e1510_config_init, 2078 .config_init = &m88e1510_config_init,
2092 .config_aneg = &m88e1510_config_aneg, 2079 .config_aneg = &m88e1510_config_aneg,
2093 .read_status = &marvell_read_status, 2080 .read_status = &marvell_read_status,
@@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
2109 .features = PHY_GBIT_FEATURES, 2096 .features = PHY_GBIT_FEATURES,
2110 .flags = PHY_HAS_INTERRUPT, 2097 .flags = PHY_HAS_INTERRUPT,
2111 .probe = m88e1510_probe, 2098 .probe = m88e1510_probe,
2112 .remove = &marvell_remove,
2113 .config_init = &marvell_config_init, 2099 .config_init = &marvell_config_init,
2114 .config_aneg = &m88e1510_config_aneg, 2100 .config_aneg = &m88e1510_config_aneg,
2115 .read_status = &marvell_read_status, 2101 .read_status = &marvell_read_status,
@@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
2127 .phy_id_mask = MARVELL_PHY_ID_MASK, 2113 .phy_id_mask = MARVELL_PHY_ID_MASK,
2128 .name = "Marvell 88E1545", 2114 .name = "Marvell 88E1545",
2129 .probe = m88e1510_probe, 2115 .probe = m88e1510_probe,
2130 .remove = &marvell_remove,
2131 .features = PHY_GBIT_FEATURES, 2116 .features = PHY_GBIT_FEATURES,
2132 .flags = PHY_HAS_INTERRUPT, 2117 .flags = PHY_HAS_INTERRUPT,
2133 .config_init = &marvell_config_init, 2118 .config_init = &marvell_config_init,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index daec6555f3b1..5198ccfa347f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
1864 .phy_id = 0xffffffff, 1864 .phy_id = 0xffffffff,
1865 .phy_id_mask = 0xffffffff, 1865 .phy_id_mask = 0xffffffff,
1866 .name = "Generic PHY", 1866 .name = "Generic PHY",
1867 .soft_reset = genphy_soft_reset, 1867 .soft_reset = genphy_no_soft_reset,
1868 .config_init = genphy_config_init, 1868 .config_init = genphy_config_init,
1869 .features = PHY_GBIT_FEATURES | SUPPORTED_MII | 1869 .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
1870 SUPPORTED_AUI | SUPPORTED_FIBRE | 1870 SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedfa2994..1e2d4f1179da 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
491 if (err) 491 if (err)
492 return err; 492 return err;
493 493
494 ks->regs_attr.size = ks->chip->regs_size;
495 memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr)); 494 memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
495 ks->regs_attr.size = ks->chip->regs_size;
496 496
497 err = ks8995_reset(ks); 497 err = ks8995_reset(ks);
498 if (err) 498 if (err)
499 return err; 499 return err;
500 500
501 sysfs_attr_init(&ks->regs_attr.attr);
501 err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr); 502 err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
502 if (err) { 503 if (err) {
503 dev_err(&spi->dev, "unable to create sysfs file, err=%d\n", 504 dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4a24b5d15f5a..1b52520715ae 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
2072static void team_setup(struct net_device *dev) 2072static void team_setup(struct net_device *dev)
2073{ 2073{
2074 ether_setup(dev); 2074 ether_setup(dev);
2075 dev->max_mtu = ETH_MAX_MTU;
2075 2076
2076 dev->netdev_ops = &team_netdev_ops; 2077 dev->netdev_ops = &team_netdev_ops;
2077 dev->ethtool_ops = &team_ethtool_ops; 2078 dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dc1b1dd9157c..34cc3c590aa5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
822/* Net device open. */ 822/* Net device open. */
823static int tun_net_open(struct net_device *dev) 823static int tun_net_open(struct net_device *dev)
824{ 824{
825 struct tun_struct *tun = netdev_priv(dev);
826 int i;
827
825 netif_tx_start_all_queues(dev); 828 netif_tx_start_all_queues(dev);
829
830 for (i = 0; i < tun->numqueues; i++) {
831 struct tun_file *tfile;
832
833 tfile = rtnl_dereference(tun->tfiles[i]);
834 tfile->socket.sk->sk_write_space(tfile->socket.sk);
835 }
836
826 return 0; 837 return 0;
827} 838}
828 839
@@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1103 if (!skb_array_empty(&tfile->tx_array)) 1114 if (!skb_array_empty(&tfile->tx_array))
1104 mask |= POLLIN | POLLRDNORM; 1115 mask |= POLLIN | POLLRDNORM;
1105 1116
1106 if (sock_writeable(sk) || 1117 if (tun->dev->flags & IFF_UP &&
1107 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1118 (sock_writeable(sk) ||
1108 sock_writeable(sk))) 1119 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1120 sock_writeable(sk))))
1109 mask |= POLLOUT | POLLWRNORM; 1121 mask |= POLLOUT | POLLWRNORM;
1110 1122
1111 if (tun->dev->reg_state != NETREG_REGISTERED) 1123 if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -2570,7 +2582,6 @@ static int __init tun_init(void)
2570 int ret = 0; 2582 int ret = 0;
2571 2583
2572 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 2584 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2573 pr_info("%s\n", DRV_COPYRIGHT);
2574 2585
2575 ret = rtnl_link_register(&tun_link_ops); 2586 ret = rtnl_link_register(&tun_link_ops);
2576 if (ret) { 2587 if (ret) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 22379da63400..fea687f35b5a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
340 340
341static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) 341static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
342{ 342{
343 int len = skb->len;
343 netdev_tx_t ret = is_ip_tx_frame(skb, dev); 344 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
344 345
345 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 346 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
347 348
348 u64_stats_update_begin(&dstats->syncp); 349 u64_stats_update_begin(&dstats->syncp);
349 dstats->tx_pkts++; 350 dstats->tx_pkts++;
350 dstats->tx_bytes += skb->len; 351 dstats->tx_bytes += len;
351 u64_stats_update_end(&dstats->syncp); 352 u64_stats_update_end(&dstats->syncp);
352 } else { 353 } else {
353 this_cpu_inc(dev->dstats->tx_drps); 354 this_cpu_inc(dev->dstats->tx_drps);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e375560cc74e..bdb6ae16d4a8 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2976 return 0; 2976 return 0;
2977} 2977}
2978 2978
2979static int __vxlan_dev_create(struct net *net, struct net_device *dev,
2980 struct vxlan_config *conf)
2981{
2982 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2983 struct vxlan_dev *vxlan = netdev_priv(dev);
2984 int err;
2985
2986 err = vxlan_dev_configure(net, dev, conf, false);
2987 if (err)
2988 return err;
2989
2990 dev->ethtool_ops = &vxlan_ethtool_ops;
2991
2992 /* create an fdb entry for a valid default destination */
2993 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2994 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2995 &vxlan->default_dst.remote_ip,
2996 NUD_REACHABLE | NUD_PERMANENT,
2997 NLM_F_EXCL | NLM_F_CREATE,
2998 vxlan->cfg.dst_port,
2999 vxlan->default_dst.remote_vni,
3000 vxlan->default_dst.remote_vni,
3001 vxlan->default_dst.remote_ifindex,
3002 NTF_SELF);
3003 if (err)
3004 return err;
3005 }
3006
3007 err = register_netdevice(dev);
3008 if (err) {
3009 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
3010 return err;
3011 }
3012
3013 list_add(&vxlan->next, &vn->vxlan_list);
3014 return 0;
3015}
3016
2979static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 3017static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
2980 struct net_device *dev, struct vxlan_config *conf, 3018 struct net_device *dev, struct vxlan_config *conf,
2981 bool changelink) 3019 bool changelink)
@@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3172static int vxlan_newlink(struct net *src_net, struct net_device *dev, 3210static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3173 struct nlattr *tb[], struct nlattr *data[]) 3211 struct nlattr *tb[], struct nlattr *data[])
3174{ 3212{
3175 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
3176 struct vxlan_dev *vxlan = netdev_priv(dev);
3177 struct vxlan_config conf; 3213 struct vxlan_config conf;
3178 int err; 3214 int err;
3179 3215
@@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3181 if (err) 3217 if (err)
3182 return err; 3218 return err;
3183 3219
3184 err = vxlan_dev_configure(src_net, dev, &conf, false); 3220 return __vxlan_dev_create(src_net, dev, &conf);
3185 if (err)
3186 return err;
3187
3188 dev->ethtool_ops = &vxlan_ethtool_ops;
3189
3190 /* create an fdb entry for a valid default destination */
3191 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3192 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3193 &vxlan->default_dst.remote_ip,
3194 NUD_REACHABLE | NUD_PERMANENT,
3195 NLM_F_EXCL | NLM_F_CREATE,
3196 vxlan->cfg.dst_port,
3197 vxlan->default_dst.remote_vni,
3198 vxlan->default_dst.remote_vni,
3199 vxlan->default_dst.remote_ifindex,
3200 NTF_SELF);
3201 if (err)
3202 return err;
3203 }
3204
3205 err = register_netdevice(dev);
3206 if (err) {
3207 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
3208 return err;
3209 }
3210
3211 list_add(&vxlan->next, &vn->vxlan_list);
3212
3213 return 0;
3214} 3221}
3215 3222
3216static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], 3223static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
3440 if (IS_ERR(dev)) 3447 if (IS_ERR(dev))
3441 return dev; 3448 return dev;
3442 3449
3443 err = vxlan_dev_configure(net, dev, conf, false); 3450 err = __vxlan_dev_create(net, dev, conf);
3444 if (err < 0) { 3451 if (err < 0) {
3445 free_netdev(dev); 3452 free_netdev(dev);
3446 return ERR_PTR(err); 3453 return ERR_PTR(err);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index a5045b5279d7..6742ae605660 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
381 /* set bd status and length */ 381 /* set bd status and length */
382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; 382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
383 383
384 iowrite16be(bd_status, &bd->status);
385 iowrite16be(skb->len, &bd->length); 384 iowrite16be(skb->len, &bd->length);
385 iowrite16be(bd_status, &bd->status);
386 386
387 /* Move to next BD in the ring */ 387 /* Move to next BD in the ring */
388 if (!(bd_status & T_W_S)) 388 if (!(bd_status & T_W_S))
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
457 struct sk_buff *skb; 457 struct sk_buff *skb;
458 hdlc_device *hdlc = dev_to_hdlc(dev); 458 hdlc_device *hdlc = dev_to_hdlc(dev);
459 struct qe_bd *bd; 459 struct qe_bd *bd;
460 u32 bd_status; 460 u16 bd_status;
461 u16 length, howmany = 0; 461 u16 length, howmany = 0;
462 u8 *bdbuffer; 462 u8 *bdbuffer;
463 int i; 463 int i;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910a6519..f8eb66ef2944 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
467 struct i2400mu *i2400mu; 467 struct i2400mu *i2400mu;
468 struct usb_device *usb_dev = interface_to_usbdev(iface); 468 struct usb_device *usb_dev = interface_to_usbdev(iface);
469 469
470 if (iface->cur_altsetting->desc.bNumEndpoints < 4)
471 return -ENODEV;
472
470 if (usb_dev->speed != USB_SPEED_HIGH) 473 if (usb_dev->speed != USB_SPEED_HIGH)
471 dev_err(dev, "device not connected as high speed\n"); 474 dev_err(dev, "device not connected as high speed\n");
472 475
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 829b26cd4549..8397f6c92451 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
165{ 165{
166 struct xenvif *vif = netdev_priv(dev); 166 struct xenvif *vif = netdev_priv(dev);
167 struct xenvif_queue *queue = NULL; 167 struct xenvif_queue *queue = NULL;
168 unsigned int num_queues = vif->num_queues; 168 unsigned int num_queues;
169 u16 index; 169 u16 index;
170 struct xenvif_rx_cb *cb; 170 struct xenvif_rx_cb *cb;
171 171
172 BUG_ON(skb->dev != dev); 172 BUG_ON(skb->dev != dev);
173 173
174 /* Drop the packet if queues are not set up */ 174 /* Drop the packet if queues are not set up.
175 * This handler should be called inside an RCU read section
176 * so we don't need to enter it here explicitly.
177 */
178 num_queues = READ_ONCE(vif->num_queues);
175 if (num_queues < 1) 179 if (num_queues < 1)
176 goto drop; 180 goto drop;
177 181
@@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
222{ 226{
223 struct xenvif *vif = netdev_priv(dev); 227 struct xenvif *vif = netdev_priv(dev);
224 struct xenvif_queue *queue = NULL; 228 struct xenvif_queue *queue = NULL;
229 unsigned int num_queues;
225 u64 rx_bytes = 0; 230 u64 rx_bytes = 0;
226 u64 rx_packets = 0; 231 u64 rx_packets = 0;
227 u64 tx_bytes = 0; 232 u64 tx_bytes = 0;
228 u64 tx_packets = 0; 233 u64 tx_packets = 0;
229 unsigned int index; 234 unsigned int index;
230 235
231 spin_lock(&vif->lock); 236 rcu_read_lock();
232 if (vif->queues == NULL) 237 num_queues = READ_ONCE(vif->num_queues);
233 goto out;
234 238
235 /* Aggregate tx and rx stats from each queue */ 239 /* Aggregate tx and rx stats from each queue */
236 for (index = 0; index < vif->num_queues; ++index) { 240 for (index = 0; index < num_queues; ++index) {
237 queue = &vif->queues[index]; 241 queue = &vif->queues[index];
238 rx_bytes += queue->stats.rx_bytes; 242 rx_bytes += queue->stats.rx_bytes;
239 rx_packets += queue->stats.rx_packets; 243 rx_packets += queue->stats.rx_packets;
@@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
241 tx_packets += queue->stats.tx_packets; 245 tx_packets += queue->stats.tx_packets;
242 } 246 }
243 247
244out: 248 rcu_read_unlock();
245 spin_unlock(&vif->lock);
246 249
247 vif->dev->stats.rx_bytes = rx_bytes; 250 vif->dev->stats.rx_bytes = rx_bytes;
248 vif->dev->stats.rx_packets = rx_packets; 251 vif->dev->stats.rx_packets = rx_packets;
@@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
378 struct ethtool_stats *stats, u64 * data) 381 struct ethtool_stats *stats, u64 * data)
379{ 382{
380 struct xenvif *vif = netdev_priv(dev); 383 struct xenvif *vif = netdev_priv(dev);
381 unsigned int num_queues = vif->num_queues; 384 unsigned int num_queues;
382 int i; 385 int i;
383 unsigned int queue_index; 386 unsigned int queue_index;
384 387
388 rcu_read_lock();
389 num_queues = READ_ONCE(vif->num_queues);
390
385 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { 391 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
386 unsigned long accum = 0; 392 unsigned long accum = 0;
387 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 393 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
390 } 396 }
391 data[i] = accum; 397 data[i] = accum;
392 } 398 }
399
400 rcu_read_unlock();
393} 401}
394 402
395static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 403static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f9bcf4a665bc..602d408fa25e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
214 netdev_err(vif->dev, "fatal error; disabling device\n"); 214 netdev_err(vif->dev, "fatal error; disabling device\n");
215 vif->disabled = true; 215 vif->disabled = true;
216 /* Disable the vif from queue 0's kthread */ 216 /* Disable the vif from queue 0's kthread */
217 if (vif->queues) 217 if (vif->num_queues)
218 xenvif_kick_thread(&vif->queues[0]); 218 xenvif_kick_thread(&vif->queues[0]);
219} 219}
220 220
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d2d7cd9145b1..a56d3eab35dd 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
495 struct xenvif *vif = be->vif; 495 struct xenvif *vif = be->vif;
496 496
497 if (vif) { 497 if (vif) {
498 unsigned int num_queues = vif->num_queues;
498 unsigned int queue_index; 499 unsigned int queue_index;
499 struct xenvif_queue *queues;
500 500
501 xen_unregister_watchers(vif); 501 xen_unregister_watchers(vif);
502#ifdef CONFIG_DEBUG_FS 502#ifdef CONFIG_DEBUG_FS
503 xenvif_debugfs_delif(vif); 503 xenvif_debugfs_delif(vif);
504#endif /* CONFIG_DEBUG_FS */ 504#endif /* CONFIG_DEBUG_FS */
505 xenvif_disconnect_data(vif); 505 xenvif_disconnect_data(vif);
506 for (queue_index = 0;
507 queue_index < vif->num_queues;
508 ++queue_index)
509 xenvif_deinit_queue(&vif->queues[queue_index]);
510 506
511 spin_lock(&vif->lock); 507 /* At this point some of the handlers may still be active
512 queues = vif->queues; 508 * so we need to have additional synchronization here.
509 */
513 vif->num_queues = 0; 510 vif->num_queues = 0;
514 vif->queues = NULL; 511 synchronize_net();
515 spin_unlock(&vif->lock);
516 512
517 vfree(queues); 513 for (queue_index = 0; queue_index < num_queues; ++queue_index)
514 xenvif_deinit_queue(&vif->queues[queue_index]);
515
516 vfree(vif->queues);
517 vif->queues = NULL;
518 518
519 xenvif_disconnect_ctrl(vif); 519 xenvif_disconnect_ctrl(vif);
520 } 520 }
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 993b650ef275..44f774c12fb2 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
132 struct device *dev = pci->dev; 132 struct device *dev = pci->dev;
133 struct resource *res; 133 struct resource *res;
134 134
135 /* If using the PHY framework, doesn't need to get other resource */
136 if (ep->using_phy)
137 return 0;
138
139 ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); 135 ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
140 if (!ep->mem_res) 136 if (!ep->mem_res)
141 return -ENOMEM; 137 return -ENOMEM;
@@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
145 if (IS_ERR(ep->mem_res->elbi_base)) 141 if (IS_ERR(ep->mem_res->elbi_base))
146 return PTR_ERR(ep->mem_res->elbi_base); 142 return PTR_ERR(ep->mem_res->elbi_base);
147 143
144 /* If using the PHY framework, doesn't need to get other resource */
145 if (ep->using_phy)
146 return 0;
147
148 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 148 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
149 ep->mem_res->phy_base = devm_ioremap_resource(dev, res); 149 ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
150 if (IS_ERR(ep->mem_res->phy_base)) 150 if (IS_ERR(ep->mem_res->phy_base))
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 973472c23d89..1dfa10cc566b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
478 478
479static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) 479static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
480{ 480{
481 struct pci_dev *child, *parent = link->pdev; 481 struct pci_dev *child = link->downstream, *parent = link->pdev;
482 struct pci_bus *linkbus = parent->subordinate; 482 struct pci_bus *linkbus = parent->subordinate;
483 struct aspm_register_info upreg, dwreg; 483 struct aspm_register_info upreg, dwreg;
484 484
@@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
491 491
492 /* Get upstream/downstream components' register state */ 492 /* Get upstream/downstream components' register state */
493 pcie_get_aspm_reg(parent, &upreg); 493 pcie_get_aspm_reg(parent, &upreg);
494 child = pci_function_0(linkbus);
495 pcie_get_aspm_reg(child, &dwreg); 494 pcie_get_aspm_reg(child, &dwreg);
496 link->downstream = child;
497 495
498 /* 496 /*
499 * If ASPM not supported, don't mess with the clocks and link, 497 * If ASPM not supported, don't mess with the clocks and link,
@@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
800 INIT_LIST_HEAD(&link->children); 798 INIT_LIST_HEAD(&link->children);
801 INIT_LIST_HEAD(&link->link); 799 INIT_LIST_HEAD(&link->link);
802 link->pdev = pdev; 800 link->pdev = pdev;
801 link->downstream = pci_function_0(pdev->subordinate);
803 802
804 /* 803 /*
805 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe 804 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f754453fe754..673683660b5c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
2174DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); 2174DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
2175DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, 2175DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
2176 quirk_blacklist_vpd); 2176 quirk_blacklist_vpd);
2177DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
2177 2178
2178/* 2179/*
2179 * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the 2180 * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f8e9e1c2b2f6..c978be5eb9eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
422 return 0; 422 return 0;
423} 423}
424 424
425static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
426{
427 struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
428 const struct msm_pingroup *g;
429 u32 val;
430
431 g = &pctrl->soc->groups[offset];
432
433 val = readl(pctrl->regs + g->ctl_reg);
434
435 /* 0 = output, 1 = input */
436 return val & BIT(g->oe_bit) ? 0 : 1;
437}
438
425static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) 439static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
426{ 440{
427 const struct msm_pingroup *g; 441 const struct msm_pingroup *g;
@@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
510static struct gpio_chip msm_gpio_template = { 524static struct gpio_chip msm_gpio_template = {
511 .direction_input = msm_gpio_direction_input, 525 .direction_input = msm_gpio_direction_input,
512 .direction_output = msm_gpio_direction_output, 526 .direction_output = msm_gpio_direction_output,
527 .get_direction = msm_gpio_get_direction,
513 .get = msm_gpio_get, 528 .get = msm_gpio_get,
514 .set = msm_gpio_set, 529 .set = msm_gpio_set,
515 .request = gpiochip_generic_request, 530 .request = gpiochip_generic_request,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 77a0236ee781..83f8864fa76a 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
390 UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140, 390 UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140,
391 140, UNIPHIER_PIN_DRV_1BIT, 391 140, UNIPHIER_PIN_DRV_1BIT,
392 140, UNIPHIER_PIN_PULL_DOWN), 392 140, UNIPHIER_PIN_PULL_DOWN),
393 UNIPHIER_PINCTRL_PIN(141, "TCON0", 141, 393 UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141,
394 141, UNIPHIER_PIN_DRV_1BIT, 394 141, UNIPHIER_PIN_DRV_1BIT,
395 141, UNIPHIER_PIN_PULL_DOWN), 395 141, UNIPHIER_PIN_PULL_DOWN),
396 UNIPHIER_PINCTRL_PIN(142, "TCON1", 142, 396 UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142,
397 142, UNIPHIER_PIN_DRV_1BIT, 397 142, UNIPHIER_PIN_DRV_1BIT,
398 142, UNIPHIER_PIN_PULL_DOWN), 398 142, UNIPHIER_PIN_PULL_DOWN),
399 UNIPHIER_PINCTRL_PIN(143, "TCON2", 143, 399 UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143,
400 143, UNIPHIER_PIN_DRV_1BIT, 400 143, UNIPHIER_PIN_DRV_1BIT,
401 143, UNIPHIER_PIN_PULL_DOWN), 401 143, UNIPHIER_PIN_PULL_DOWN),
402 UNIPHIER_PINCTRL_PIN(144, "TCON3", 144, 402 UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144,
403 144, UNIPHIER_PIN_DRV_1BIT, 403 144, UNIPHIER_PIN_DRV_1BIT,
404 144, UNIPHIER_PIN_PULL_DOWN), 404 144, UNIPHIER_PIN_PULL_DOWN),
405 UNIPHIER_PINCTRL_PIN(145, "TCON4", 145, 405 UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145,
406 145, UNIPHIER_PIN_DRV_1BIT, 406 145, UNIPHIER_PIN_DRV_1BIT,
407 145, UNIPHIER_PIN_PULL_DOWN), 407 145, UNIPHIER_PIN_PULL_DOWN),
408 UNIPHIER_PINCTRL_PIN(146, "TCON5", 146, 408 UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146,
409 146, UNIPHIER_PIN_DRV_1BIT, 409 146, UNIPHIER_PIN_DRV_1BIT,
410 146, UNIPHIER_PIN_PULL_DOWN), 410 146, UNIPHIER_PIN_PULL_DOWN),
411 UNIPHIER_PINCTRL_PIN(147, "PWMA", 147, 411 UNIPHIER_PINCTRL_PIN(147, "PWMA", 147,
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 5be4783e40d4..dea98ffb6f60 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = {
103 .wapf = 2, 103 .wapf = 2,
104}; 104};
105 105
106static struct quirk_entry quirk_no_rfkill = {
107 .no_rfkill = true,
108};
109
110static struct quirk_entry quirk_no_rfkill_wapf4 = {
111 .wapf = 4,
112 .no_rfkill = true,
113};
114
115static struct quirk_entry quirk_asus_ux303ub = { 106static struct quirk_entry quirk_asus_ux303ub = {
116 .wmi_backlight_native = true, 107 .wmi_backlight_native = true,
117}; 108};
@@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = {
194 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 185 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
195 DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"), 186 DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
196 }, 187 },
197 .driver_data = &quirk_no_rfkill_wapf4, 188 .driver_data = &quirk_asus_wapf4,
198 }, 189 },
199 { 190 {
200 .callback = dmi_matched, 191 .callback = dmi_matched,
@@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = {
203 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 194 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
204 DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"), 195 DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
205 }, 196 },
206 .driver_data = &quirk_no_rfkill_wapf4, 197 .driver_data = &quirk_asus_wapf4,
207 }, 198 },
208 { 199 {
209 .callback = dmi_matched, 200 .callback = dmi_matched,
@@ -369,42 +360,6 @@ static const struct dmi_system_id asus_quirks[] = {
369 }, 360 },
370 { 361 {
371 .callback = dmi_matched, 362 .callback = dmi_matched,
372 .ident = "ASUSTeK COMPUTER INC. X555UB",
373 .matches = {
374 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
375 DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
376 },
377 .driver_data = &quirk_no_rfkill,
378 },
379 {
380 .callback = dmi_matched,
381 .ident = "ASUSTeK COMPUTER INC. N552VW",
382 .matches = {
383 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
384 DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
385 },
386 .driver_data = &quirk_no_rfkill,
387 },
388 {
389 .callback = dmi_matched,
390 .ident = "ASUSTeK COMPUTER INC. U303LB",
391 .matches = {
392 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
393 DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
394 },
395 .driver_data = &quirk_no_rfkill,
396 },
397 {
398 .callback = dmi_matched,
399 .ident = "ASUSTeK COMPUTER INC. Z550MA",
400 .matches = {
401 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
402 DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
403 },
404 .driver_data = &quirk_no_rfkill,
405 },
406 {
407 .callback = dmi_matched,
408 .ident = "ASUSTeK COMPUTER INC. UX303UB", 363 .ident = "ASUSTeK COMPUTER INC. UX303UB",
409 .matches = { 364 .matches = {
410 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 365 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 43cb680adbb4..8fe5890bf539 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
159#define USB_INTEL_XUSB2PR 0xD0 159#define USB_INTEL_XUSB2PR 0xD0
160#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 160#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
161 161
162static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
163
162struct bios_args { 164struct bios_args {
163 u32 arg0; 165 u32 arg0;
164 u32 arg1; 166 u32 arg1;
@@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
2051 return 0; 2053 return 0;
2052} 2054}
2053 2055
2056static bool ashs_present(void)
2057{
2058 int i = 0;
2059 while (ashs_ids[i]) {
2060 if (acpi_dev_found(ashs_ids[i++]))
2061 return true;
2062 }
2063 return false;
2064}
2065
2054/* 2066/*
2055 * WMI Driver 2067 * WMI Driver
2056 */ 2068 */
@@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev)
2095 if (err) 2107 if (err)
2096 goto fail_leds; 2108 goto fail_leds;
2097 2109
2098 if (!asus->driver->quirks->no_rfkill) { 2110 asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
2111 if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
2112 asus->driver->wlan_ctrl_by_user = 1;
2113
2114 if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
2099 err = asus_wmi_rfkill_init(asus); 2115 err = asus_wmi_rfkill_init(asus);
2100 if (err) 2116 if (err)
2101 goto fail_rfkill; 2117 goto fail_rfkill;
@@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev)
2134 if (err) 2150 if (err)
2135 goto fail_debugfs; 2151 goto fail_debugfs;
2136 2152
2137 asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
2138 if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
2139 asus->driver->wlan_ctrl_by_user = 1;
2140
2141 return 0; 2153 return 0;
2142 2154
2143fail_debugfs: 2155fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index fdff626c3b51..c9589d9342bb 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -39,7 +39,6 @@ struct key_entry;
39struct asus_wmi; 39struct asus_wmi;
40 40
41struct quirk_entry { 41struct quirk_entry {
42 bool no_rfkill;
43 bool hotplug_wireless; 42 bool hotplug_wireless;
44 bool scalar_panel_brightness; 43 bool scalar_panel_brightness;
45 bool store_backlight_power; 44 bool store_backlight_power;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 2b218b1d13e5..e12cc3504d48 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -78,18 +78,18 @@
78 78
79#define FUJITSU_LCD_N_LEVELS 8 79#define FUJITSU_LCD_N_LEVELS 8
80 80
81#define ACPI_FUJITSU_CLASS "fujitsu" 81#define ACPI_FUJITSU_CLASS "fujitsu"
82#define ACPI_FUJITSU_HID "FUJ02B1" 82#define ACPI_FUJITSU_BL_HID "FUJ02B1"
83#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver" 83#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
84#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1" 84#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
85#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3" 85#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
86#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver" 86#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
87#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3" 87#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
88 88
89#define ACPI_FUJITSU_NOTIFY_CODE1 0x80 89#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
90 90
91/* FUNC interface - command values */ 91/* FUNC interface - command values */
92#define FUNC_RFKILL 0x1000 92#define FUNC_FLAGS 0x1000
93#define FUNC_LEDS 0x1001 93#define FUNC_LEDS 0x1001
94#define FUNC_BUTTONS 0x1002 94#define FUNC_BUTTONS 0x1002
95#define FUNC_BACKLIGHT 0x1004 95#define FUNC_BACKLIGHT 0x1004
@@ -97,6 +97,11 @@
97/* FUNC interface - responses */ 97/* FUNC interface - responses */
98#define UNSUPPORTED_CMD 0x80000000 98#define UNSUPPORTED_CMD 0x80000000
99 99
100/* FUNC interface - status flags */
101#define FLAG_RFKILL 0x020
102#define FLAG_LID 0x100
103#define FLAG_DOCK 0x200
104
100#if IS_ENABLED(CONFIG_LEDS_CLASS) 105#if IS_ENABLED(CONFIG_LEDS_CLASS)
101/* FUNC interface - LED control */ 106/* FUNC interface - LED control */
102#define FUNC_LED_OFF 0x1 107#define FUNC_LED_OFF 0x1
@@ -136,7 +141,7 @@
136#endif 141#endif
137 142
138/* Device controlling the backlight and associated keys */ 143/* Device controlling the backlight and associated keys */
139struct fujitsu_t { 144struct fujitsu_bl {
140 acpi_handle acpi_handle; 145 acpi_handle acpi_handle;
141 struct acpi_device *dev; 146 struct acpi_device *dev;
142 struct input_dev *input; 147 struct input_dev *input;
@@ -150,12 +155,12 @@ struct fujitsu_t {
150 unsigned int brightness_level; 155 unsigned int brightness_level;
151}; 156};
152 157
153static struct fujitsu_t *fujitsu; 158static struct fujitsu_bl *fujitsu_bl;
154static int use_alt_lcd_levels = -1; 159static int use_alt_lcd_levels = -1;
155static int disable_brightness_adjust = -1; 160static int disable_brightness_adjust = -1;
156 161
157/* Device used to access other hotkeys on the laptop */ 162/* Device used to access hotkeys and other features on the laptop */
158struct fujitsu_hotkey_t { 163struct fujitsu_laptop {
159 acpi_handle acpi_handle; 164 acpi_handle acpi_handle;
160 struct acpi_device *dev; 165 struct acpi_device *dev;
161 struct input_dev *input; 166 struct input_dev *input;
@@ -163,17 +168,15 @@ struct fujitsu_hotkey_t {
163 struct platform_device *pf_device; 168 struct platform_device *pf_device;
164 struct kfifo fifo; 169 struct kfifo fifo;
165 spinlock_t fifo_lock; 170 spinlock_t fifo_lock;
166 int rfkill_supported; 171 int flags_supported;
167 int rfkill_state; 172 int flags_state;
168 int logolamp_registered; 173 int logolamp_registered;
169 int kblamps_registered; 174 int kblamps_registered;
170 int radio_led_registered; 175 int radio_led_registered;
171 int eco_led_registered; 176 int eco_led_registered;
172}; 177};
173 178
174static struct fujitsu_hotkey_t *fujitsu_hotkey; 179static struct fujitsu_laptop *fujitsu_laptop;
175
176static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
177 180
178#if IS_ENABLED(CONFIG_LEDS_CLASS) 181#if IS_ENABLED(CONFIG_LEDS_CLASS)
179static enum led_brightness logolamp_get(struct led_classdev *cdev); 182static enum led_brightness logolamp_get(struct led_classdev *cdev);
@@ -222,8 +225,6 @@ static struct led_classdev eco_led = {
222static u32 dbg_level = 0x03; 225static u32 dbg_level = 0x03;
223#endif 226#endif
224 227
225static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
226
227/* Fujitsu ACPI interface function */ 228/* Fujitsu ACPI interface function */
228 229
229static int call_fext_func(int cmd, int arg0, int arg1, int arg2) 230static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
@@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
239 unsigned long long value; 240 unsigned long long value;
240 acpi_handle handle = NULL; 241 acpi_handle handle = NULL;
241 242
242 status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle); 243 status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
243 if (ACPI_FAILURE(status)) { 244 if (ACPI_FAILURE(status)) {
244 vdbg_printk(FUJLAPTOP_DBG_ERROR, 245 vdbg_printk(FUJLAPTOP_DBG_ERROR,
245 "FUNC interface is not present\n"); 246 "FUNC interface is not present\n");
@@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev,
300 enum led_brightness brightness) 301 enum led_brightness brightness)
301{ 302{
302 if (brightness >= LED_FULL) 303 if (brightness >= LED_FULL)
303 return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); 304 return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
304 else 305 else
305 return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); 306 return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
306} 307}
307 308
308static int eco_led_set(struct led_classdev *cdev, 309static int eco_led_set(struct led_classdev *cdev,
@@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
346{ 347{
347 enum led_brightness brightness = LED_OFF; 348 enum led_brightness brightness = LED_OFF;
348 349
349 if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON) 350 if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
350 brightness = LED_FULL; 351 brightness = LED_FULL;
351 352
352 return brightness; 353 return brightness;
@@ -373,10 +374,10 @@ static int set_lcd_level(int level)
373 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n", 374 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
374 level); 375 level);
375 376
376 if (level < 0 || level >= fujitsu->max_brightness) 377 if (level < 0 || level >= fujitsu_bl->max_brightness)
377 return -EINVAL; 378 return -EINVAL;
378 379
379 status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle); 380 status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
380 if (ACPI_FAILURE(status)) { 381 if (ACPI_FAILURE(status)) {
381 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n"); 382 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
382 return -ENODEV; 383 return -ENODEV;
@@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level)
398 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n", 399 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
399 level); 400 level);
400 401
401 if (level < 0 || level >= fujitsu->max_brightness) 402 if (level < 0 || level >= fujitsu_bl->max_brightness)
402 return -EINVAL; 403 return -EINVAL;
403 404
404 status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle); 405 status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
405 if (ACPI_FAILURE(status)) { 406 if (ACPI_FAILURE(status)) {
406 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n"); 407 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
407 return -ENODEV; 408 return -ENODEV;
@@ -421,19 +422,19 @@ static int get_lcd_level(void)
421 422
422 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n"); 423 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
423 424
424 status = 425 status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
425 acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state); 426 &state);
426 if (ACPI_FAILURE(status)) 427 if (ACPI_FAILURE(status))
427 return 0; 428 return 0;
428 429
429 fujitsu->brightness_level = state & 0x0fffffff; 430 fujitsu_bl->brightness_level = state & 0x0fffffff;
430 431
431 if (state & 0x80000000) 432 if (state & 0x80000000)
432 fujitsu->brightness_changed = 1; 433 fujitsu_bl->brightness_changed = 1;
433 else 434 else
434 fujitsu->brightness_changed = 0; 435 fujitsu_bl->brightness_changed = 0;
435 436
436 return fujitsu->brightness_level; 437 return fujitsu_bl->brightness_level;
437} 438}
438 439
439static int get_max_brightness(void) 440static int get_max_brightness(void)
@@ -443,14 +444,14 @@ static int get_max_brightness(void)
443 444
444 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n"); 445 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
445 446
446 status = 447 status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
447 acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state); 448 &state);
448 if (ACPI_FAILURE(status)) 449 if (ACPI_FAILURE(status))
449 return -1; 450 return -1;
450 451
451 fujitsu->max_brightness = state; 452 fujitsu_bl->max_brightness = state;
452 453
453 return fujitsu->max_brightness; 454 return fujitsu_bl->max_brightness;
454} 455}
455 456
456/* Backlight device stuff */ 457/* Backlight device stuff */
@@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b)
483 return ret; 484 return ret;
484} 485}
485 486
486static const struct backlight_ops fujitsubl_ops = { 487static const struct backlight_ops fujitsu_bl_ops = {
487 .get_brightness = bl_get_brightness, 488 .get_brightness = bl_get_brightness,
488 .update_status = bl_update_status, 489 .update_status = bl_update_status,
489}; 490};
@@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev,
511 512
512 int ret; 513 int ret;
513 514
514 ret = fujitsu->brightness_changed; 515 ret = fujitsu_bl->brightness_changed;
515 if (ret < 0) 516 if (ret < 0)
516 return ret; 517 return ret;
517 518
@@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev,
539 int level, ret; 540 int level, ret;
540 541
541 if (sscanf(buf, "%i", &level) != 1 542 if (sscanf(buf, "%i", &level) != 1
542 || (level < 0 || level >= fujitsu->max_brightness)) 543 || (level < 0 || level >= fujitsu_bl->max_brightness))
543 return -EINVAL; 544 return -EINVAL;
544 545
545 if (use_alt_lcd_levels) 546 if (use_alt_lcd_levels)
@@ -567,9 +568,9 @@ static ssize_t
567show_lid_state(struct device *dev, 568show_lid_state(struct device *dev,
568 struct device_attribute *attr, char *buf) 569 struct device_attribute *attr, char *buf)
569{ 570{
570 if (!(fujitsu_hotkey->rfkill_supported & 0x100)) 571 if (!(fujitsu_laptop->flags_supported & FLAG_LID))
571 return sprintf(buf, "unknown\n"); 572 return sprintf(buf, "unknown\n");
572 if (fujitsu_hotkey->rfkill_state & 0x100) 573 if (fujitsu_laptop->flags_state & FLAG_LID)
573 return sprintf(buf, "open\n"); 574 return sprintf(buf, "open\n");
574 else 575 else
575 return sprintf(buf, "closed\n"); 576 return sprintf(buf, "closed\n");
@@ -579,9 +580,9 @@ static ssize_t
579show_dock_state(struct device *dev, 580show_dock_state(struct device *dev,
580 struct device_attribute *attr, char *buf) 581 struct device_attribute *attr, char *buf)
581{ 582{
582 if (!(fujitsu_hotkey->rfkill_supported & 0x200)) 583 if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
583 return sprintf(buf, "unknown\n"); 584 return sprintf(buf, "unknown\n");
584 if (fujitsu_hotkey->rfkill_state & 0x200) 585 if (fujitsu_laptop->flags_state & FLAG_DOCK)
585 return sprintf(buf, "docked\n"); 586 return sprintf(buf, "docked\n");
586 else 587 else
587 return sprintf(buf, "undocked\n"); 588 return sprintf(buf, "undocked\n");
@@ -591,9 +592,9 @@ static ssize_t
591show_radios_state(struct device *dev, 592show_radios_state(struct device *dev,
592 struct device_attribute *attr, char *buf) 593 struct device_attribute *attr, char *buf)
593{ 594{
594 if (!(fujitsu_hotkey->rfkill_supported & 0x20)) 595 if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
595 return sprintf(buf, "unknown\n"); 596 return sprintf(buf, "unknown\n");
596 if (fujitsu_hotkey->rfkill_state & 0x20) 597 if (fujitsu_laptop->flags_state & FLAG_RFKILL)
597 return sprintf(buf, "on\n"); 598 return sprintf(buf, "on\n");
598 else 599 else
599 return sprintf(buf, "killed\n"); 600 return sprintf(buf, "killed\n");
@@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
607static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store); 608static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
608static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store); 609static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
609 610
610static struct attribute *fujitsupf_attributes[] = { 611static struct attribute *fujitsu_pf_attributes[] = {
611 &dev_attr_brightness_changed.attr, 612 &dev_attr_brightness_changed.attr,
612 &dev_attr_max_brightness.attr, 613 &dev_attr_max_brightness.attr,
613 &dev_attr_lcd_level.attr, 614 &dev_attr_lcd_level.attr,
@@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = {
617 NULL 618 NULL
618}; 619};
619 620
620static struct attribute_group fujitsupf_attribute_group = { 621static struct attribute_group fujitsu_pf_attribute_group = {
621 .attrs = fujitsupf_attributes 622 .attrs = fujitsu_pf_attributes
622}; 623};
623 624
624static struct platform_driver fujitsupf_driver = { 625static struct platform_driver fujitsu_pf_driver = {
625 .driver = { 626 .driver = {
626 .name = "fujitsu-laptop", 627 .name = "fujitsu-laptop",
627 } 628 }
@@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = {
630static void __init dmi_check_cb_common(const struct dmi_system_id *id) 631static void __init dmi_check_cb_common(const struct dmi_system_id *id)
631{ 632{
632 pr_info("Identified laptop model '%s'\n", id->ident); 633 pr_info("Identified laptop model '%s'\n", id->ident);
633 if (use_alt_lcd_levels == -1) {
634 if (acpi_has_method(NULL,
635 "\\_SB.PCI0.LPCB.FJEX.SBL2"))
636 use_alt_lcd_levels = 1;
637 else
638 use_alt_lcd_levels = 0;
639 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
640 "%i\n", use_alt_lcd_levels);
641 }
642} 634}
643 635
644static int __init dmi_check_cb_s6410(const struct dmi_system_id *id) 636static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
645{ 637{
646 dmi_check_cb_common(id); 638 dmi_check_cb_common(id);
647 fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ 639 fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
648 fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ 640 fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
649 return 1; 641 return 1;
650} 642}
651 643
652static int __init dmi_check_cb_s6420(const struct dmi_system_id *id) 644static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
653{ 645{
654 dmi_check_cb_common(id); 646 dmi_check_cb_common(id);
655 fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ 647 fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
656 fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */ 648 fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
657 return 1; 649 return 1;
658} 650}
659 651
660static int __init dmi_check_cb_p8010(const struct dmi_system_id *id) 652static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
661{ 653{
662 dmi_check_cb_common(id); 654 dmi_check_cb_common(id);
663 fujitsu->keycode1 = KEY_HELP; /* "Support" */ 655 fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */
664 fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */ 656 fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
665 fujitsu->keycode4 = KEY_WWW; /* "Internet" */ 657 fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */
666 return 1; 658 return 1;
667} 659}
668 660
@@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
693 685
694/* ACPI device for LCD brightness control */ 686/* ACPI device for LCD brightness control */
695 687
696static int acpi_fujitsu_add(struct acpi_device *device) 688static int acpi_fujitsu_bl_add(struct acpi_device *device)
697{ 689{
698 int state = 0; 690 int state = 0;
699 struct input_dev *input; 691 struct input_dev *input;
@@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
702 if (!device) 694 if (!device)
703 return -EINVAL; 695 return -EINVAL;
704 696
705 fujitsu->acpi_handle = device->handle; 697 fujitsu_bl->acpi_handle = device->handle;
706 sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME); 698 sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
707 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 699 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
708 device->driver_data = fujitsu; 700 device->driver_data = fujitsu_bl;
709 701
710 fujitsu->input = input = input_allocate_device(); 702 fujitsu_bl->input = input = input_allocate_device();
711 if (!input) { 703 if (!input) {
712 error = -ENOMEM; 704 error = -ENOMEM;
713 goto err_stop; 705 goto err_stop;
714 } 706 }
715 707
716 snprintf(fujitsu->phys, sizeof(fujitsu->phys), 708 snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
717 "%s/video/input0", acpi_device_hid(device)); 709 "%s/video/input0", acpi_device_hid(device));
718 710
719 input->name = acpi_device_name(device); 711 input->name = acpi_device_name(device);
720 input->phys = fujitsu->phys; 712 input->phys = fujitsu_bl->phys;
721 input->id.bustype = BUS_HOST; 713 input->id.bustype = BUS_HOST;
722 input->id.product = 0x06; 714 input->id.product = 0x06;
723 input->dev.parent = &device->dev; 715 input->dev.parent = &device->dev;
@@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
730 if (error) 722 if (error)
731 goto err_free_input_dev; 723 goto err_free_input_dev;
732 724
733 error = acpi_bus_update_power(fujitsu->acpi_handle, &state); 725 error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
734 if (error) { 726 if (error) {
735 pr_err("Error reading power state\n"); 727 pr_err("Error reading power state\n");
736 goto err_unregister_input_dev; 728 goto err_unregister_input_dev;
@@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
740 acpi_device_name(device), acpi_device_bid(device), 732 acpi_device_name(device), acpi_device_bid(device),
741 !device->power.state ? "on" : "off"); 733 !device->power.state ? "on" : "off");
742 734
743 fujitsu->dev = device; 735 fujitsu_bl->dev = device;
744 736
745 if (acpi_has_method(device->handle, METHOD_NAME__INI)) { 737 if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
746 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); 738 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device)
750 pr_err("_INI Method failed\n"); 742 pr_err("_INI Method failed\n");
751 } 743 }
752 744
745 if (use_alt_lcd_levels == -1) {
746 if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
747 use_alt_lcd_levels = 1;
748 else
749 use_alt_lcd_levels = 0;
750 vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
751 use_alt_lcd_levels);
752 }
753
753 /* do config (detect defaults) */ 754 /* do config (detect defaults) */
754 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0; 755 use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
755 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0; 756 disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
758 use_alt_lcd_levels, disable_brightness_adjust); 759 use_alt_lcd_levels, disable_brightness_adjust);
759 760
760 if (get_max_brightness() <= 0) 761 if (get_max_brightness() <= 0)
761 fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS; 762 fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
762 get_lcd_level(); 763 get_lcd_level();
763 764
764 return 0; 765 return 0;
@@ -772,38 +773,38 @@ err_stop:
772 return error; 773 return error;
773} 774}
774 775
775static int acpi_fujitsu_remove(struct acpi_device *device) 776static int acpi_fujitsu_bl_remove(struct acpi_device *device)
776{ 777{
777 struct fujitsu_t *fujitsu = acpi_driver_data(device); 778 struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
778 struct input_dev *input = fujitsu->input; 779 struct input_dev *input = fujitsu_bl->input;
779 780
780 input_unregister_device(input); 781 input_unregister_device(input);
781 782
782 fujitsu->acpi_handle = NULL; 783 fujitsu_bl->acpi_handle = NULL;
783 784
784 return 0; 785 return 0;
785} 786}
786 787
787/* Brightness notify */ 788/* Brightness notify */
788 789
789static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) 790static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
790{ 791{
791 struct input_dev *input; 792 struct input_dev *input;
792 int keycode; 793 int keycode;
793 int oldb, newb; 794 int oldb, newb;
794 795
795 input = fujitsu->input; 796 input = fujitsu_bl->input;
796 797
797 switch (event) { 798 switch (event) {
798 case ACPI_FUJITSU_NOTIFY_CODE1: 799 case ACPI_FUJITSU_NOTIFY_CODE1:
799 keycode = 0; 800 keycode = 0;
800 oldb = fujitsu->brightness_level; 801 oldb = fujitsu_bl->brightness_level;
801 get_lcd_level(); 802 get_lcd_level();
802 newb = fujitsu->brightness_level; 803 newb = fujitsu_bl->brightness_level;
803 804
804 vdbg_printk(FUJLAPTOP_DBG_TRACE, 805 vdbg_printk(FUJLAPTOP_DBG_TRACE,
805 "brightness button event [%i -> %i (%i)]\n", 806 "brightness button event [%i -> %i (%i)]\n",
806 oldb, newb, fujitsu->brightness_changed); 807 oldb, newb, fujitsu_bl->brightness_changed);
807 808
808 if (oldb < newb) { 809 if (oldb < newb) {
809 if (disable_brightness_adjust != 1) { 810 if (disable_brightness_adjust != 1) {
@@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
840 841
841/* ACPI device for hotkey handling */ 842/* ACPI device for hotkey handling */
842 843
843static int acpi_fujitsu_hotkey_add(struct acpi_device *device) 844static int acpi_fujitsu_laptop_add(struct acpi_device *device)
844{ 845{
845 int result = 0; 846 int result = 0;
846 int state = 0; 847 int state = 0;
@@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
851 if (!device) 852 if (!device)
852 return -EINVAL; 853 return -EINVAL;
853 854
854 fujitsu_hotkey->acpi_handle = device->handle; 855 fujitsu_laptop->acpi_handle = device->handle;
855 sprintf(acpi_device_name(device), "%s", 856 sprintf(acpi_device_name(device), "%s",
856 ACPI_FUJITSU_HOTKEY_DEVICE_NAME); 857 ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
857 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); 858 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
858 device->driver_data = fujitsu_hotkey; 859 device->driver_data = fujitsu_laptop;
859 860
860 /* kfifo */ 861 /* kfifo */
861 spin_lock_init(&fujitsu_hotkey->fifo_lock); 862 spin_lock_init(&fujitsu_laptop->fifo_lock);
862 error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int), 863 error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
863 GFP_KERNEL); 864 GFP_KERNEL);
864 if (error) { 865 if (error) {
865 pr_err("kfifo_alloc failed\n"); 866 pr_err("kfifo_alloc failed\n");
866 goto err_stop; 867 goto err_stop;
867 } 868 }
868 869
869 fujitsu_hotkey->input = input = input_allocate_device(); 870 fujitsu_laptop->input = input = input_allocate_device();
870 if (!input) { 871 if (!input) {
871 error = -ENOMEM; 872 error = -ENOMEM;
872 goto err_free_fifo; 873 goto err_free_fifo;
873 } 874 }
874 875
875 snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), 876 snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
876 "%s/video/input0", acpi_device_hid(device)); 877 "%s/video/input0", acpi_device_hid(device));
877 878
878 input->name = acpi_device_name(device); 879 input->name = acpi_device_name(device);
879 input->phys = fujitsu_hotkey->phys; 880 input->phys = fujitsu_laptop->phys;
880 input->id.bustype = BUS_HOST; 881 input->id.bustype = BUS_HOST;
881 input->id.product = 0x06; 882 input->id.product = 0x06;
882 input->dev.parent = &device->dev; 883 input->dev.parent = &device->dev;
883 884
884 set_bit(EV_KEY, input->evbit); 885 set_bit(EV_KEY, input->evbit);
885 set_bit(fujitsu->keycode1, input->keybit); 886 set_bit(fujitsu_bl->keycode1, input->keybit);
886 set_bit(fujitsu->keycode2, input->keybit); 887 set_bit(fujitsu_bl->keycode2, input->keybit);
887 set_bit(fujitsu->keycode3, input->keybit); 888 set_bit(fujitsu_bl->keycode3, input->keybit);
888 set_bit(fujitsu->keycode4, input->keybit); 889 set_bit(fujitsu_bl->keycode4, input->keybit);
889 set_bit(fujitsu->keycode5, input->keybit); 890 set_bit(fujitsu_bl->keycode5, input->keybit);
890 set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit); 891 set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
891 set_bit(KEY_UNKNOWN, input->keybit); 892 set_bit(KEY_UNKNOWN, input->keybit);
892 893
@@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
894 if (error) 895 if (error)
895 goto err_free_input_dev; 896 goto err_free_input_dev;
896 897
897 error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); 898 error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
898 if (error) { 899 if (error) {
899 pr_err("Error reading power state\n"); 900 pr_err("Error reading power state\n");
900 goto err_unregister_input_dev; 901 goto err_unregister_input_dev;
@@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
904 acpi_device_name(device), acpi_device_bid(device), 905 acpi_device_name(device), acpi_device_bid(device),
905 !device->power.state ? "on" : "off"); 906 !device->power.state ? "on" : "off");
906 907
907 fujitsu_hotkey->dev = device; 908 fujitsu_laptop->dev = device;
908 909
909 if (acpi_has_method(device->handle, METHOD_NAME__INI)) { 910 if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
910 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n"); 911 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
920 ; /* No action, result is discarded */ 921 ; /* No action, result is discarded */
921 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i); 922 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
922 923
923 fujitsu_hotkey->rfkill_supported = 924 fujitsu_laptop->flags_supported =
924 call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0); 925 call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
925 926
926 /* Make sure our bitmask of supported functions is cleared if the 927 /* Make sure our bitmask of supported functions is cleared if the
927 RFKILL function block is not implemented, like on the S7020. */ 928 RFKILL function block is not implemented, like on the S7020. */
928 if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD) 929 if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
929 fujitsu_hotkey->rfkill_supported = 0; 930 fujitsu_laptop->flags_supported = 0;
930 931
931 if (fujitsu_hotkey->rfkill_supported) 932 if (fujitsu_laptop->flags_supported)
932 fujitsu_hotkey->rfkill_state = 933 fujitsu_laptop->flags_state =
933 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); 934 call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
934 935
935 /* Suspect this is a keymap of the application panel, print it */ 936 /* Suspect this is a keymap of the application panel, print it */
936 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0)); 937 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
937 938
938#if IS_ENABLED(CONFIG_LEDS_CLASS) 939#if IS_ENABLED(CONFIG_LEDS_CLASS)
939 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { 940 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
940 result = led_classdev_register(&fujitsu->pf_device->dev, 941 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
941 &logolamp_led); 942 &logolamp_led);
942 if (result == 0) { 943 if (result == 0) {
943 fujitsu_hotkey->logolamp_registered = 1; 944 fujitsu_laptop->logolamp_registered = 1;
944 } else { 945 } else {
945 pr_err("Could not register LED handler for logo lamp, error %i\n", 946 pr_err("Could not register LED handler for logo lamp, error %i\n",
946 result); 947 result);
@@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
949 950
950 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) && 951 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
951 (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) { 952 (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
952 result = led_classdev_register(&fujitsu->pf_device->dev, 953 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
953 &kblamps_led); 954 &kblamps_led);
954 if (result == 0) { 955 if (result == 0) {
955 fujitsu_hotkey->kblamps_registered = 1; 956 fujitsu_laptop->kblamps_registered = 1;
956 } else { 957 } else {
957 pr_err("Could not register LED handler for keyboard lamps, error %i\n", 958 pr_err("Could not register LED handler for keyboard lamps, error %i\n",
958 result); 959 result);
@@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
966 * that an RF LED is present. 967 * that an RF LED is present.
967 */ 968 */
968 if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { 969 if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
969 result = led_classdev_register(&fujitsu->pf_device->dev, 970 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
970 &radio_led); 971 &radio_led);
971 if (result == 0) { 972 if (result == 0) {
972 fujitsu_hotkey->radio_led_registered = 1; 973 fujitsu_laptop->radio_led_registered = 1;
973 } else { 974 } else {
974 pr_err("Could not register LED handler for radio LED, error %i\n", 975 pr_err("Could not register LED handler for radio LED, error %i\n",
975 result); 976 result);
@@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
983 */ 984 */
984 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) && 985 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
985 (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) { 986 (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
986 result = led_classdev_register(&fujitsu->pf_device->dev, 987 result = led_classdev_register(&fujitsu_bl->pf_device->dev,
987 &eco_led); 988 &eco_led);
988 if (result == 0) { 989 if (result == 0) {
989 fujitsu_hotkey->eco_led_registered = 1; 990 fujitsu_laptop->eco_led_registered = 1;
990 } else { 991 } else {
991 pr_err("Could not register LED handler for eco LED, error %i\n", 992 pr_err("Could not register LED handler for eco LED, error %i\n",
992 result); 993 result);
@@ -1002,47 +1003,47 @@ err_unregister_input_dev:
1002err_free_input_dev: 1003err_free_input_dev:
1003 input_free_device(input); 1004 input_free_device(input);
1004err_free_fifo: 1005err_free_fifo:
1005 kfifo_free(&fujitsu_hotkey->fifo); 1006 kfifo_free(&fujitsu_laptop->fifo);
1006err_stop: 1007err_stop:
1007 return error; 1008 return error;
1008} 1009}
1009 1010
1010static int acpi_fujitsu_hotkey_remove(struct acpi_device *device) 1011static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
1011{ 1012{
1012 struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); 1013 struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
1013 struct input_dev *input = fujitsu_hotkey->input; 1014 struct input_dev *input = fujitsu_laptop->input;
1014 1015
1015#if IS_ENABLED(CONFIG_LEDS_CLASS) 1016#if IS_ENABLED(CONFIG_LEDS_CLASS)
1016 if (fujitsu_hotkey->logolamp_registered) 1017 if (fujitsu_laptop->logolamp_registered)
1017 led_classdev_unregister(&logolamp_led); 1018 led_classdev_unregister(&logolamp_led);
1018 1019
1019 if (fujitsu_hotkey->kblamps_registered) 1020 if (fujitsu_laptop->kblamps_registered)
1020 led_classdev_unregister(&kblamps_led); 1021 led_classdev_unregister(&kblamps_led);
1021 1022
1022 if (fujitsu_hotkey->radio_led_registered) 1023 if (fujitsu_laptop->radio_led_registered)
1023 led_classdev_unregister(&radio_led); 1024 led_classdev_unregister(&radio_led);
1024 1025
1025 if (fujitsu_hotkey->eco_led_registered) 1026 if (fujitsu_laptop->eco_led_registered)
1026 led_classdev_unregister(&eco_led); 1027 led_classdev_unregister(&eco_led);
1027#endif 1028#endif
1028 1029
1029 input_unregister_device(input); 1030 input_unregister_device(input);
1030 1031
1031 kfifo_free(&fujitsu_hotkey->fifo); 1032 kfifo_free(&fujitsu_laptop->fifo);
1032 1033
1033 fujitsu_hotkey->acpi_handle = NULL; 1034 fujitsu_laptop->acpi_handle = NULL;
1034 1035
1035 return 0; 1036 return 0;
1036} 1037}
1037 1038
1038static void acpi_fujitsu_hotkey_press(int keycode) 1039static void acpi_fujitsu_laptop_press(int keycode)
1039{ 1040{
1040 struct input_dev *input = fujitsu_hotkey->input; 1041 struct input_dev *input = fujitsu_laptop->input;
1041 int status; 1042 int status;
1042 1043
1043 status = kfifo_in_locked(&fujitsu_hotkey->fifo, 1044 status = kfifo_in_locked(&fujitsu_laptop->fifo,
1044 (unsigned char *)&keycode, sizeof(keycode), 1045 (unsigned char *)&keycode, sizeof(keycode),
1045 &fujitsu_hotkey->fifo_lock); 1046 &fujitsu_laptop->fifo_lock);
1046 if (status != sizeof(keycode)) { 1047 if (status != sizeof(keycode)) {
1047 vdbg_printk(FUJLAPTOP_DBG_WARN, 1048 vdbg_printk(FUJLAPTOP_DBG_WARN,
1048 "Could not push keycode [0x%x]\n", keycode); 1049 "Could not push keycode [0x%x]\n", keycode);
@@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode)
1054 "Push keycode into ringbuffer [%d]\n", keycode); 1055 "Push keycode into ringbuffer [%d]\n", keycode);
1055} 1056}
1056 1057
1057static void acpi_fujitsu_hotkey_release(void) 1058static void acpi_fujitsu_laptop_release(void)
1058{ 1059{
1059 struct input_dev *input = fujitsu_hotkey->input; 1060 struct input_dev *input = fujitsu_laptop->input;
1060 int keycode, status; 1061 int keycode, status;
1061 1062
1062 while (true) { 1063 while (true) {
1063 status = kfifo_out_locked(&fujitsu_hotkey->fifo, 1064 status = kfifo_out_locked(&fujitsu_laptop->fifo,
1064 (unsigned char *)&keycode, 1065 (unsigned char *)&keycode,
1065 sizeof(keycode), 1066 sizeof(keycode),
1066 &fujitsu_hotkey->fifo_lock); 1067 &fujitsu_laptop->fifo_lock);
1067 if (status != sizeof(keycode)) 1068 if (status != sizeof(keycode))
1068 return; 1069 return;
1069 input_report_key(input, keycode, 0); 1070 input_report_key(input, keycode, 0);
@@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void)
1073 } 1074 }
1074} 1075}
1075 1076
1076static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) 1077static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
1077{ 1078{
1078 struct input_dev *input; 1079 struct input_dev *input;
1079 int keycode; 1080 int keycode;
1080 unsigned int irb = 1; 1081 unsigned int irb = 1;
1081 int i; 1082 int i;
1082 1083
1083 input = fujitsu_hotkey->input; 1084 input = fujitsu_laptop->input;
1084 1085
1085 if (event != ACPI_FUJITSU_NOTIFY_CODE1) { 1086 if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
1086 keycode = KEY_UNKNOWN; 1087 keycode = KEY_UNKNOWN;
@@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1093 return; 1094 return;
1094 } 1095 }
1095 1096
1096 if (fujitsu_hotkey->rfkill_supported) 1097 if (fujitsu_laptop->flags_supported)
1097 fujitsu_hotkey->rfkill_state = 1098 fujitsu_laptop->flags_state =
1098 call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0); 1099 call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
1099 1100
1100 i = 0; 1101 i = 0;
1101 while ((irb = 1102 while ((irb =
@@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1103 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) { 1104 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
1104 switch (irb & 0x4ff) { 1105 switch (irb & 0x4ff) {
1105 case KEY1_CODE: 1106 case KEY1_CODE:
1106 keycode = fujitsu->keycode1; 1107 keycode = fujitsu_bl->keycode1;
1107 break; 1108 break;
1108 case KEY2_CODE: 1109 case KEY2_CODE:
1109 keycode = fujitsu->keycode2; 1110 keycode = fujitsu_bl->keycode2;
1110 break; 1111 break;
1111 case KEY3_CODE: 1112 case KEY3_CODE:
1112 keycode = fujitsu->keycode3; 1113 keycode = fujitsu_bl->keycode3;
1113 break; 1114 break;
1114 case KEY4_CODE: 1115 case KEY4_CODE:
1115 keycode = fujitsu->keycode4; 1116 keycode = fujitsu_bl->keycode4;
1116 break; 1117 break;
1117 case KEY5_CODE: 1118 case KEY5_CODE:
1118 keycode = fujitsu->keycode5; 1119 keycode = fujitsu_bl->keycode5;
1119 break; 1120 break;
1120 case 0: 1121 case 0:
1121 keycode = 0; 1122 keycode = 0;
@@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1128 } 1129 }
1129 1130
1130 if (keycode > 0) 1131 if (keycode > 0)
1131 acpi_fujitsu_hotkey_press(keycode); 1132 acpi_fujitsu_laptop_press(keycode);
1132 else if (keycode == 0) 1133 else if (keycode == 0)
1133 acpi_fujitsu_hotkey_release(); 1134 acpi_fujitsu_laptop_release();
1134 } 1135 }
1135 1136
1136 /* On some models (first seen on the Skylake-based Lifebook 1137 /* On some models (first seen on the Skylake-based Lifebook
1137 * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is 1138 * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
1138 * handled in software; its state is queried using FUNC_RFKILL 1139 * handled in software; its state is queried using FUNC_FLAGS
1139 */ 1140 */
1140 if ((fujitsu_hotkey->rfkill_supported & BIT(26)) && 1141 if ((fujitsu_laptop->flags_supported & BIT(26)) &&
1141 (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) { 1142 (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
1142 keycode = KEY_TOUCHPAD_TOGGLE; 1143 keycode = KEY_TOUCHPAD_TOGGLE;
1143 input_report_key(input, keycode, 1); 1144 input_report_key(input, keycode, 1);
1144 input_sync(input); 1145 input_sync(input);
@@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1150 1151
1151/* Initialization */ 1152/* Initialization */
1152 1153
1153static const struct acpi_device_id fujitsu_device_ids[] = { 1154static const struct acpi_device_id fujitsu_bl_device_ids[] = {
1154 {ACPI_FUJITSU_HID, 0}, 1155 {ACPI_FUJITSU_BL_HID, 0},
1155 {"", 0}, 1156 {"", 0},
1156}; 1157};
1157 1158
1158static struct acpi_driver acpi_fujitsu_driver = { 1159static struct acpi_driver acpi_fujitsu_bl_driver = {
1159 .name = ACPI_FUJITSU_DRIVER_NAME, 1160 .name = ACPI_FUJITSU_BL_DRIVER_NAME,
1160 .class = ACPI_FUJITSU_CLASS, 1161 .class = ACPI_FUJITSU_CLASS,
1161 .ids = fujitsu_device_ids, 1162 .ids = fujitsu_bl_device_ids,
1162 .ops = { 1163 .ops = {
1163 .add = acpi_fujitsu_add, 1164 .add = acpi_fujitsu_bl_add,
1164 .remove = acpi_fujitsu_remove, 1165 .remove = acpi_fujitsu_bl_remove,
1165 .notify = acpi_fujitsu_notify, 1166 .notify = acpi_fujitsu_bl_notify,
1166 }, 1167 },
1167}; 1168};
1168 1169
1169static const struct acpi_device_id fujitsu_hotkey_device_ids[] = { 1170static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
1170 {ACPI_FUJITSU_HOTKEY_HID, 0}, 1171 {ACPI_FUJITSU_LAPTOP_HID, 0},
1171 {"", 0}, 1172 {"", 0},
1172}; 1173};
1173 1174
1174static struct acpi_driver acpi_fujitsu_hotkey_driver = { 1175static struct acpi_driver acpi_fujitsu_laptop_driver = {
1175 .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME, 1176 .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
1176 .class = ACPI_FUJITSU_CLASS, 1177 .class = ACPI_FUJITSU_CLASS,
1177 .ids = fujitsu_hotkey_device_ids, 1178 .ids = fujitsu_laptop_device_ids,
1178 .ops = { 1179 .ops = {
1179 .add = acpi_fujitsu_hotkey_add, 1180 .add = acpi_fujitsu_laptop_add,
1180 .remove = acpi_fujitsu_hotkey_remove, 1181 .remove = acpi_fujitsu_laptop_remove,
1181 .notify = acpi_fujitsu_hotkey_notify, 1182 .notify = acpi_fujitsu_laptop_notify,
1182 }, 1183 },
1183}; 1184};
1184 1185
1185static const struct acpi_device_id fujitsu_ids[] __used = { 1186static const struct acpi_device_id fujitsu_ids[] __used = {
1186 {ACPI_FUJITSU_HID, 0}, 1187 {ACPI_FUJITSU_BL_HID, 0},
1187 {ACPI_FUJITSU_HOTKEY_HID, 0}, 1188 {ACPI_FUJITSU_LAPTOP_HID, 0},
1188 {"", 0} 1189 {"", 0}
1189}; 1190};
1190MODULE_DEVICE_TABLE(acpi, fujitsu_ids); 1191MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
1191 1192
1192static int __init fujitsu_init(void) 1193static int __init fujitsu_init(void)
1193{ 1194{
1194 int ret, result, max_brightness; 1195 int ret, max_brightness;
1195 1196
1196 if (acpi_disabled) 1197 if (acpi_disabled)
1197 return -ENODEV; 1198 return -ENODEV;
1198 1199
1199 fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL); 1200 fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
1200 if (!fujitsu) 1201 if (!fujitsu_bl)
1201 return -ENOMEM; 1202 return -ENOMEM;
1202 fujitsu->keycode1 = KEY_PROG1; 1203 fujitsu_bl->keycode1 = KEY_PROG1;
1203 fujitsu->keycode2 = KEY_PROG2; 1204 fujitsu_bl->keycode2 = KEY_PROG2;
1204 fujitsu->keycode3 = KEY_PROG3; 1205 fujitsu_bl->keycode3 = KEY_PROG3;
1205 fujitsu->keycode4 = KEY_PROG4; 1206 fujitsu_bl->keycode4 = KEY_PROG4;
1206 fujitsu->keycode5 = KEY_RFKILL; 1207 fujitsu_bl->keycode5 = KEY_RFKILL;
1207 dmi_check_system(fujitsu_dmi_table); 1208 dmi_check_system(fujitsu_dmi_table);
1208 1209
1209 result = acpi_bus_register_driver(&acpi_fujitsu_driver); 1210 ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
1210 if (result < 0) { 1211 if (ret)
1211 ret = -ENODEV;
1212 goto fail_acpi; 1212 goto fail_acpi;
1213 }
1214 1213
1215 /* Register platform stuff */ 1214 /* Register platform stuff */
1216 1215
1217 fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1); 1216 fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
1218 if (!fujitsu->pf_device) { 1217 if (!fujitsu_bl->pf_device) {
1219 ret = -ENOMEM; 1218 ret = -ENOMEM;
1220 goto fail_platform_driver; 1219 goto fail_platform_driver;
1221 } 1220 }
1222 1221
1223 ret = platform_device_add(fujitsu->pf_device); 1222 ret = platform_device_add(fujitsu_bl->pf_device);
1224 if (ret) 1223 if (ret)
1225 goto fail_platform_device1; 1224 goto fail_platform_device1;
1226 1225
1227 ret = 1226 ret =
1228 sysfs_create_group(&fujitsu->pf_device->dev.kobj, 1227 sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
1229 &fujitsupf_attribute_group); 1228 &fujitsu_pf_attribute_group);
1230 if (ret) 1229 if (ret)
1231 goto fail_platform_device2; 1230 goto fail_platform_device2;
1232 1231
@@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void)
1236 struct backlight_properties props; 1235 struct backlight_properties props;
1237 1236
1238 memset(&props, 0, sizeof(struct backlight_properties)); 1237 memset(&props, 0, sizeof(struct backlight_properties));
1239 max_brightness = fujitsu->max_brightness; 1238 max_brightness = fujitsu_bl->max_brightness;
1240 props.type = BACKLIGHT_PLATFORM; 1239 props.type = BACKLIGHT_PLATFORM;
1241 props.max_brightness = max_brightness - 1; 1240 props.max_brightness = max_brightness - 1;
1242 fujitsu->bl_device = backlight_device_register("fujitsu-laptop", 1241 fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
1243 NULL, NULL, 1242 NULL, NULL,
1244 &fujitsubl_ops, 1243 &fujitsu_bl_ops,
1245 &props); 1244 &props);
1246 if (IS_ERR(fujitsu->bl_device)) { 1245 if (IS_ERR(fujitsu_bl->bl_device)) {
1247 ret = PTR_ERR(fujitsu->bl_device); 1246 ret = PTR_ERR(fujitsu_bl->bl_device);
1248 fujitsu->bl_device = NULL; 1247 fujitsu_bl->bl_device = NULL;
1249 goto fail_sysfs_group; 1248 goto fail_sysfs_group;
1250 } 1249 }
1251 fujitsu->bl_device->props.brightness = fujitsu->brightness_level; 1250 fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
1252 } 1251 }
1253 1252
1254 ret = platform_driver_register(&fujitsupf_driver); 1253 ret = platform_driver_register(&fujitsu_pf_driver);
1255 if (ret) 1254 if (ret)
1256 goto fail_backlight; 1255 goto fail_backlight;
1257 1256
1258 /* Register hotkey driver */ 1257 /* Register laptop driver */
1259 1258
1260 fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL); 1259 fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
1261 if (!fujitsu_hotkey) { 1260 if (!fujitsu_laptop) {
1262 ret = -ENOMEM; 1261 ret = -ENOMEM;
1263 goto fail_hotkey; 1262 goto fail_laptop;
1264 } 1263 }
1265 1264
1266 result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver); 1265 ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
1267 if (result < 0) { 1266 if (ret)
1268 ret = -ENODEV; 1267 goto fail_laptop1;
1269 goto fail_hotkey1;
1270 }
1271 1268
1272 /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */ 1269 /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
1273 if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { 1270 if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
1274 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) 1271 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
1275 fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN; 1272 fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
1276 else 1273 else
1277 fujitsu->bl_device->props.power = FB_BLANK_UNBLANK; 1274 fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
1278 } 1275 }
1279 1276
1280 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n"); 1277 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
1281 1278
1282 return 0; 1279 return 0;
1283 1280
1284fail_hotkey1: 1281fail_laptop1:
1285 kfree(fujitsu_hotkey); 1282 kfree(fujitsu_laptop);
1286fail_hotkey: 1283fail_laptop:
1287 platform_driver_unregister(&fujitsupf_driver); 1284 platform_driver_unregister(&fujitsu_pf_driver);
1288fail_backlight: 1285fail_backlight:
1289 backlight_device_unregister(fujitsu->bl_device); 1286 backlight_device_unregister(fujitsu_bl->bl_device);
1290fail_sysfs_group: 1287fail_sysfs_group:
1291 sysfs_remove_group(&fujitsu->pf_device->dev.kobj, 1288 sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
1292 &fujitsupf_attribute_group); 1289 &fujitsu_pf_attribute_group);
1293fail_platform_device2: 1290fail_platform_device2:
1294 platform_device_del(fujitsu->pf_device); 1291 platform_device_del(fujitsu_bl->pf_device);
1295fail_platform_device1: 1292fail_platform_device1:
1296 platform_device_put(fujitsu->pf_device); 1293 platform_device_put(fujitsu_bl->pf_device);
1297fail_platform_driver: 1294fail_platform_driver:
1298 acpi_bus_unregister_driver(&acpi_fujitsu_driver); 1295 acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
1299fail_acpi: 1296fail_acpi:
1300 kfree(fujitsu); 1297 kfree(fujitsu_bl);
1301 1298
1302 return ret; 1299 return ret;
1303} 1300}
1304 1301
1305static void __exit fujitsu_cleanup(void) 1302static void __exit fujitsu_cleanup(void)
1306{ 1303{
1307 acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver); 1304 acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
1308 1305
1309 kfree(fujitsu_hotkey); 1306 kfree(fujitsu_laptop);
1310 1307
1311 platform_driver_unregister(&fujitsupf_driver); 1308 platform_driver_unregister(&fujitsu_pf_driver);
1312 1309
1313 backlight_device_unregister(fujitsu->bl_device); 1310 backlight_device_unregister(fujitsu_bl->bl_device);
1314 1311
1315 sysfs_remove_group(&fujitsu->pf_device->dev.kobj, 1312 sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
1316 &fujitsupf_attribute_group); 1313 &fujitsu_pf_attribute_group);
1317 1314
1318 platform_device_unregister(fujitsu->pf_device); 1315 platform_device_unregister(fujitsu_bl->pf_device);
1319 1316
1320 acpi_bus_unregister_driver(&acpi_fujitsu_driver); 1317 acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
1321 1318
1322 kfree(fujitsu); 1319 kfree(fujitsu_bl);
1323 1320
1324 pr_info("driver unloaded\n"); 1321 pr_info("driver unloaded\n");
1325} 1322}
@@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
1341MODULE_DESCRIPTION("Fujitsu laptop extras support"); 1338MODULE_DESCRIPTION("Fujitsu laptop extras support");
1342MODULE_VERSION(FUJITSU_DRIVER_VERSION); 1339MODULE_VERSION(FUJITSU_DRIVER_VERSION);
1343MODULE_LICENSE("GPL"); 1340MODULE_LICENSE("GPL");
1344
1345MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1346MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
1347MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 230043c1c90f..4bf55b5d78be 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1241,19 +1241,32 @@ config SCSI_LPFC
1241 tristate "Emulex LightPulse Fibre Channel Support" 1241 tristate "Emulex LightPulse Fibre Channel Support"
1242 depends on PCI && SCSI 1242 depends on PCI && SCSI
1243 depends on SCSI_FC_ATTRS 1243 depends on SCSI_FC_ATTRS
1244 depends on NVME_FC && NVME_TARGET_FC
1245 select CRC_T10DIF 1244 select CRC_T10DIF
1246 help 1245 ---help---
1247 This lpfc driver supports the Emulex LightPulse 1246 This lpfc driver supports the Emulex LightPulse
1248 Family of Fibre Channel PCI host adapters. 1247 Family of Fibre Channel PCI host adapters.
1249 1248
1250config SCSI_LPFC_DEBUG_FS 1249config SCSI_LPFC_DEBUG_FS
1251 bool "Emulex LightPulse Fibre Channel debugfs Support" 1250 bool "Emulex LightPulse Fibre Channel debugfs Support"
1252 depends on SCSI_LPFC && DEBUG_FS 1251 depends on SCSI_LPFC && DEBUG_FS
1253 help 1252 ---help---
1254 This makes debugging information from the lpfc driver 1253 This makes debugging information from the lpfc driver
1255 available via the debugfs filesystem. 1254 available via the debugfs filesystem.
1256 1255
1256config LPFC_NVME_INITIATOR
1257 bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
1258 depends on SCSI_LPFC && NVME_FC
1259 ---help---
1260 This enables NVME Initiator support in the Emulex lpfc driver.
1261
1262config LPFC_NVME_TARGET
1263 bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
1264 depends on SCSI_LPFC && NVME_TARGET_FC
1265 ---help---
1266 This enables NVME Target support in the Emulex lpfc driver.
1267 Target enablement must still be enabled on a per adapter
1268 basis by module parameters.
1269
1257config SCSI_SIM710 1270config SCSI_SIM710
1258 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1271 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
1259 depends on (EISA || MCA) && SCSI 1272 depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2e5338dec621..7b0410e0f569 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -468,7 +468,7 @@ err_out:
468 return -1; 468 return -1;
469 469
470err_blink: 470err_blink:
471 return (status > 16) & 0xFF; 471 return (status >> 16) & 0xFF;
472} 472}
473 473
474static inline u32 aac_get_vector(struct aac_dev *dev) 474static inline u32 aac_get_vector(struct aac_dev *dev)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 109e2c99e6c1..95d8f25cbcca 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
6278 * does not disable its parity logic prior to 6278 * does not disable its parity logic prior to
6279 * the start of the reset. This may cause a 6279 * the start of the reset. This may cause a
6280 * parity error to be detected and thus a 6280 * parity error to be detected and thus a
6281 * spurious SERR or PERR assertion. Disble 6281 * spurious SERR or PERR assertion. Disable
6282 * PERR and SERR responses during the CHIPRST. 6282 * PERR and SERR responses during the CHIPRST.
6283 */ 6283 */
6284 mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); 6284 mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 07c08ce68d70..894b1e3ebd56 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
561 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); 561 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
562 task->state = state; 562 task->state = state;
563 563
564 if (!list_empty(&task->running)) 564 spin_lock_bh(&conn->taskqueuelock);
565 if (!list_empty(&task->running)) {
566 pr_debug_once("%s while task on list", __func__);
565 list_del_init(&task->running); 567 list_del_init(&task->running);
568 }
569 spin_unlock_bh(&conn->taskqueuelock);
566 570
567 if (conn->task == task) 571 if (conn->task == task)
568 conn->task = NULL; 572 conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
784 if (session->tt->xmit_task(task)) 788 if (session->tt->xmit_task(task))
785 goto free_task; 789 goto free_task;
786 } else { 790 } else {
791 spin_lock_bh(&conn->taskqueuelock);
787 list_add_tail(&task->running, &conn->mgmtqueue); 792 list_add_tail(&task->running, &conn->mgmtqueue);
793 spin_unlock_bh(&conn->taskqueuelock);
788 iscsi_conn_queue_work(conn); 794 iscsi_conn_queue_work(conn);
789 } 795 }
790 796
@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
1475 * this may be on the requeue list already if the xmit_task callout 1481 * this may be on the requeue list already if the xmit_task callout
1476 * is handling the r2ts while we are adding new ones 1482 * is handling the r2ts while we are adding new ones
1477 */ 1483 */
1484 spin_lock_bh(&conn->taskqueuelock);
1478 if (list_empty(&task->running)) 1485 if (list_empty(&task->running))
1479 list_add_tail(&task->running, &conn->requeue); 1486 list_add_tail(&task->running, &conn->requeue);
1487 spin_unlock_bh(&conn->taskqueuelock);
1480 iscsi_conn_queue_work(conn); 1488 iscsi_conn_queue_work(conn);
1481} 1489}
1482EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1490EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
1513 * only have one nop-out as a ping from us and targets should not 1521 * only have one nop-out as a ping from us and targets should not
1514 * overflow us with nop-ins 1522 * overflow us with nop-ins
1515 */ 1523 */
1524 spin_lock_bh(&conn->taskqueuelock);
1516check_mgmt: 1525check_mgmt:
1517 while (!list_empty(&conn->mgmtqueue)) { 1526 while (!list_empty(&conn->mgmtqueue)) {
1518 conn->task = list_entry(conn->mgmtqueue.next, 1527 conn->task = list_entry(conn->mgmtqueue.next,
1519 struct iscsi_task, running); 1528 struct iscsi_task, running);
1520 list_del_init(&conn->task->running); 1529 list_del_init(&conn->task->running);
1530 spin_unlock_bh(&conn->taskqueuelock);
1521 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1531 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1522 /* regular RX path uses back_lock */ 1532 /* regular RX path uses back_lock */
1523 spin_lock_bh(&conn->session->back_lock); 1533 spin_lock_bh(&conn->session->back_lock);
1524 __iscsi_put_task(conn->task); 1534 __iscsi_put_task(conn->task);
1525 spin_unlock_bh(&conn->session->back_lock); 1535 spin_unlock_bh(&conn->session->back_lock);
1526 conn->task = NULL; 1536 conn->task = NULL;
1537 spin_lock_bh(&conn->taskqueuelock);
1527 continue; 1538 continue;
1528 } 1539 }
1529 rc = iscsi_xmit_task(conn); 1540 rc = iscsi_xmit_task(conn);
1530 if (rc) 1541 if (rc)
1531 goto done; 1542 goto done;
1543 spin_lock_bh(&conn->taskqueuelock);
1532 } 1544 }
1533 1545
1534 /* process pending command queue */ 1546 /* process pending command queue */
@@ -1536,19 +1548,24 @@ check_mgmt:
1536 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, 1548 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
1537 running); 1549 running);
1538 list_del_init(&conn->task->running); 1550 list_del_init(&conn->task->running);
1551 spin_unlock_bh(&conn->taskqueuelock);
1539 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1552 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1540 fail_scsi_task(conn->task, DID_IMM_RETRY); 1553 fail_scsi_task(conn->task, DID_IMM_RETRY);
1554 spin_lock_bh(&conn->taskqueuelock);
1541 continue; 1555 continue;
1542 } 1556 }
1543 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1557 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1544 if (rc) { 1558 if (rc) {
1545 if (rc == -ENOMEM || rc == -EACCES) { 1559 if (rc == -ENOMEM || rc == -EACCES) {
1560 spin_lock_bh(&conn->taskqueuelock);
1546 list_add_tail(&conn->task->running, 1561 list_add_tail(&conn->task->running,
1547 &conn->cmdqueue); 1562 &conn->cmdqueue);
1548 conn->task = NULL; 1563 conn->task = NULL;
1564 spin_unlock_bh(&conn->taskqueuelock);
1549 goto done; 1565 goto done;
1550 } else 1566 } else
1551 fail_scsi_task(conn->task, DID_ABORT); 1567 fail_scsi_task(conn->task, DID_ABORT);
1568 spin_lock_bh(&conn->taskqueuelock);
1552 continue; 1569 continue;
1553 } 1570 }
1554 rc = iscsi_xmit_task(conn); 1571 rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ check_mgmt:
1559 * we need to check the mgmt queue for nops that need to 1576 * we need to check the mgmt queue for nops that need to
1560 * be sent to aviod starvation 1577 * be sent to aviod starvation
1561 */ 1578 */
1579 spin_lock_bh(&conn->taskqueuelock);
1562 if (!list_empty(&conn->mgmtqueue)) 1580 if (!list_empty(&conn->mgmtqueue))
1563 goto check_mgmt; 1581 goto check_mgmt;
1564 } 1582 }
@@ -1578,12 +1596,15 @@ check_mgmt:
1578 conn->task = task; 1596 conn->task = task;
1579 list_del_init(&conn->task->running); 1597 list_del_init(&conn->task->running);
1580 conn->task->state = ISCSI_TASK_RUNNING; 1598 conn->task->state = ISCSI_TASK_RUNNING;
1599 spin_unlock_bh(&conn->taskqueuelock);
1581 rc = iscsi_xmit_task(conn); 1600 rc = iscsi_xmit_task(conn);
1582 if (rc) 1601 if (rc)
1583 goto done; 1602 goto done;
1603 spin_lock_bh(&conn->taskqueuelock);
1584 if (!list_empty(&conn->mgmtqueue)) 1604 if (!list_empty(&conn->mgmtqueue))
1585 goto check_mgmt; 1605 goto check_mgmt;
1586 } 1606 }
1607 spin_unlock_bh(&conn->taskqueuelock);
1587 spin_unlock_bh(&conn->session->frwd_lock); 1608 spin_unlock_bh(&conn->session->frwd_lock);
1588 return -ENODATA; 1609 return -ENODATA;
1589 1610
@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1739 goto prepd_reject; 1760 goto prepd_reject;
1740 } 1761 }
1741 } else { 1762 } else {
1763 spin_lock_bh(&conn->taskqueuelock);
1742 list_add_tail(&task->running, &conn->cmdqueue); 1764 list_add_tail(&task->running, &conn->cmdqueue);
1765 spin_unlock_bh(&conn->taskqueuelock);
1743 iscsi_conn_queue_work(conn); 1766 iscsi_conn_queue_work(conn);
1744 } 1767 }
1745 1768
@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2897 INIT_LIST_HEAD(&conn->mgmtqueue); 2920 INIT_LIST_HEAD(&conn->mgmtqueue);
2898 INIT_LIST_HEAD(&conn->cmdqueue); 2921 INIT_LIST_HEAD(&conn->cmdqueue);
2899 INIT_LIST_HEAD(&conn->requeue); 2922 INIT_LIST_HEAD(&conn->requeue);
2923 spin_lock_init(&conn->taskqueuelock);
2900 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2924 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2901 2925
2902 /* allocate login_task used for the login/text sequences */ 2926 /* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 0bba2e30b4f0..257bbdd0f0b8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
99#define FC_MAX_ADPTMSG 64 99#define FC_MAX_ADPTMSG 64
100 100
101#define MAX_HBAEVT 32 101#define MAX_HBAEVT 32
102#define MAX_HBAS_NO_RESET 16
102 103
103/* Number of MSI-X vectors the driver uses */ 104/* Number of MSI-X vectors the driver uses */
104#define LPFC_MSIX_VECTORS 2 105#define LPFC_MSIX_VECTORS 2
105 106
106/* lpfc wait event data ready flag */ 107/* lpfc wait event data ready flag */
107#define LPFC_DATA_READY (1<<0) 108#define LPFC_DATA_READY 0 /* bit 0 */
108 109
109/* queue dump line buffer size */ 110/* queue dump line buffer size */
110#define LPFC_LBUF_SZ 128 111#define LPFC_LBUF_SZ 128
@@ -692,6 +693,7 @@ struct lpfc_hba {
692 * capability 693 * capability
693 */ 694 */
694#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ 695#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
696#define NVME_XRI_ABORT_EVENT 0x100000
695 697
696 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 698 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
697 struct lpfc_dmabuf slim2p; 699 struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c783ef7f260..5c3be3e6f5e2 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3010static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, 3010static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
3011 lpfc_poll_show, lpfc_poll_store); 3011 lpfc_poll_show, lpfc_poll_store);
3012 3012
3013int lpfc_no_hba_reset_cnt;
3014unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3015 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3016module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3017MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3018
3013LPFC_ATTR(sli_mode, 0, 0, 3, 3019LPFC_ATTR(sli_mode, 0, 0, 3,
3014 "SLI mode selector:" 3020 "SLI mode selector:"
3015 " 0 - auto (SLI-3 if supported)," 3021 " 0 - auto (SLI-3 if supported),"
@@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
4451 return -EINVAL; 4457 return -EINVAL;
4452 4458
4453 phba->cfg_fcp_imax = (uint32_t)val; 4459 phba->cfg_fcp_imax = (uint32_t)val;
4454 for (i = 0; i < phba->io_channel_irqs; i++) 4460
4461 for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
4455 lpfc_modify_hba_eq_delay(phba, i); 4462 lpfc_modify_hba_eq_delay(phba, i);
4456 4463
4457 return strlen(buf); 4464 return strlen(buf);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 843dd73004da..54e6ac42fbcd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -384,7 +384,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
384extern struct device_attribute *lpfc_hba_attrs[]; 384extern struct device_attribute *lpfc_hba_attrs[];
385extern struct device_attribute *lpfc_vport_attrs[]; 385extern struct device_attribute *lpfc_vport_attrs[];
386extern struct scsi_host_template lpfc_template; 386extern struct scsi_host_template lpfc_template;
387extern struct scsi_host_template lpfc_template_s3; 387extern struct scsi_host_template lpfc_template_no_hr;
388extern struct scsi_host_template lpfc_template_nvme; 388extern struct scsi_host_template lpfc_template_nvme;
389extern struct scsi_host_template lpfc_vport_template; 389extern struct scsi_host_template lpfc_vport_template;
390extern struct fc_function_template lpfc_transport_functions; 390extern struct fc_function_template lpfc_transport_functions;
@@ -554,3 +554,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
554 struct lpfc_wcqe_complete *abts_cmpl); 554 struct lpfc_wcqe_complete *abts_cmpl);
555extern int lpfc_enable_nvmet_cnt; 555extern int lpfc_enable_nvmet_cnt;
556extern unsigned long long lpfc_enable_nvmet[]; 556extern unsigned long long lpfc_enable_nvmet[];
557extern int lpfc_no_hba_reset_cnt;
558extern unsigned long lpfc_no_hba_reset[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c22bb3f887e1..d3e9af983015 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -939,8 +939,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
939 "FC4 x%08x, Data: x%08x x%08x\n", 939 "FC4 x%08x, Data: x%08x x%08x\n",
940 ndlp, did, ndlp->nlp_fc4_type, 940 ndlp, did, ndlp->nlp_fc4_type,
941 FC_TYPE_FCP, FC_TYPE_NVME); 941 FC_TYPE_FCP, FC_TYPE_NVME);
942 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
942 } 943 }
943 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
944 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 944 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
945 lpfc_issue_els_prli(vport, ndlp, 0); 945 lpfc_issue_els_prli(vport, ndlp, 0);
946 } else 946 } else
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9f4798e9d938..913eed822cb8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -3653,17 +3653,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
3653 idiag.ptr_private = phba->sli4_hba.nvmels_cq; 3653 idiag.ptr_private = phba->sli4_hba.nvmels_cq;
3654 goto pass_check; 3654 goto pass_check;
3655 } 3655 }
3656 /* NVME LS complete queue */
3657 if (phba->sli4_hba.nvmels_cq &&
3658 phba->sli4_hba.nvmels_cq->queue_id == queid) {
3659 /* Sanity check */
3660 rc = lpfc_idiag_que_param_check(
3661 phba->sli4_hba.nvmels_cq, index, count);
3662 if (rc)
3663 goto error_out;
3664 idiag.ptr_private = phba->sli4_hba.nvmels_cq;
3665 goto pass_check;
3666 }
3667 /* FCP complete queue */ 3656 /* FCP complete queue */
3668 if (phba->sli4_hba.fcp_cq) { 3657 if (phba->sli4_hba.fcp_cq) {
3669 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; 3658 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -3738,17 +3727,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
3738 idiag.ptr_private = phba->sli4_hba.nvmels_wq; 3727 idiag.ptr_private = phba->sli4_hba.nvmels_wq;
3739 goto pass_check; 3728 goto pass_check;
3740 } 3729 }
3741 /* NVME LS work queue */
3742 if (phba->sli4_hba.nvmels_wq &&
3743 phba->sli4_hba.nvmels_wq->queue_id == queid) {
3744 /* Sanity check */
3745 rc = lpfc_idiag_que_param_check(
3746 phba->sli4_hba.nvmels_wq, index, count);
3747 if (rc)
3748 goto error_out;
3749 idiag.ptr_private = phba->sli4_hba.nvmels_wq;
3750 goto pass_check;
3751 }
3752 /* FCP work queue */ 3730 /* FCP work queue */
3753 if (phba->sli4_hba.fcp_wq) { 3731 if (phba->sli4_hba.fcp_wq) {
3754 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; 3732 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2d26440e6f2f..d9c61d030034 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5177,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
5177 5177
5178static uint32_t 5178static uint32_t
5179lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5179lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
5180 struct lpfc_hba *phba) 5180 struct lpfc_vport *vport)
5181{ 5181{
5182 5182
5183 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5183 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5184 5184
5185 memcpy(desc->port_names.wwnn, phba->wwnn, 5185 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
5186 sizeof(desc->port_names.wwnn)); 5186 sizeof(desc->port_names.wwnn));
5187 5187
5188 memcpy(desc->port_names.wwpn, phba->wwpn, 5188 memcpy(desc->port_names.wwpn, &vport->fc_portname,
5189 sizeof(desc->port_names.wwpn)); 5189 sizeof(desc->port_names.wwpn));
5190 5190
5191 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5191 desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5279,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5279 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 5279 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5280 (len + pcmd), &rdp_context->link_stat); 5280 (len + pcmd), &rdp_context->link_stat);
5281 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 5281 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5282 (len + pcmd), phba); 5282 (len + pcmd), vport);
5283 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 5283 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5284 (len + pcmd), vport, ndlp); 5284 (len + pcmd), vport, ndlp);
5285 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 5285 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
@@ -8371,11 +8371,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8371 spin_lock_irq(shost->host_lock); 8371 spin_lock_irq(shost->host_lock);
8372 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8372 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8373 spin_unlock_irq(shost->host_lock); 8373 spin_unlock_irq(shost->host_lock);
8374 if (vport->port_type == LPFC_PHYSICAL_PORT 8374 if (mb->mbxStatus == MBX_NOT_FINISHED)
8375 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 8375 break;
8376 lpfc_issue_init_vfi(vport); 8376 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
8377 else 8377 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
8378 if (phba->sli_rev == LPFC_SLI_REV4)
8379 lpfc_issue_init_vfi(vport);
8380 else
8381 lpfc_initial_flogi(vport);
8382 } else {
8378 lpfc_initial_fdisc(vport); 8383 lpfc_initial_fdisc(vport);
8384 }
8379 break; 8385 break;
8380 } 8386 }
8381 } else { 8387 } else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 194a14d5f8a9..180b072beef6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
313 ndlp->nlp_state, ndlp->nlp_rpi); 313 ndlp->nlp_state, ndlp->nlp_rpi);
314 } 314 }
315 315
316 if (!(vport->load_flag & FC_UNLOADING) && 316 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
317 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
318 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 317 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
319 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 318 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
320 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) && 319 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
@@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
641 lpfc_handle_rrq_active(phba); 640 lpfc_handle_rrq_active(phba);
642 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 641 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
643 lpfc_sli4_fcp_xri_abort_event_proc(phba); 642 lpfc_sli4_fcp_xri_abort_event_proc(phba);
643 if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
644 lpfc_sli4_nvme_xri_abort_event_proc(phba);
644 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 645 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
645 lpfc_sli4_els_xri_abort_event_proc(phba); 646 lpfc_sli4_els_xri_abort_event_proc(phba);
646 if (phba->hba_flag & ASYNC_EVENT) 647 if (phba->hba_flag & ASYNC_EVENT)
@@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2173 uint32_t boot_flag, addr_mode; 2174 uint32_t boot_flag, addr_mode;
2174 uint16_t fcf_index, next_fcf_index; 2175 uint16_t fcf_index, next_fcf_index;
2175 struct lpfc_fcf_rec *fcf_rec = NULL; 2176 struct lpfc_fcf_rec *fcf_rec = NULL;
2176 uint16_t vlan_id; 2177 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2177 bool select_new_fcf; 2178 bool select_new_fcf;
2178 int rc; 2179 int rc;
2179 2180
@@ -4020,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4020 rdata = rport->dd_data; 4021 rdata = rport->dd_data;
4021 /* break the link before dropping the ref */ 4022 /* break the link before dropping the ref */
4022 ndlp->rport = NULL; 4023 ndlp->rport = NULL;
4023 if (rdata && rdata->pnode == ndlp) 4024 if (rdata) {
4024 lpfc_nlp_put(ndlp); 4025 if (rdata->pnode == ndlp)
4025 rdata->pnode = NULL; 4026 lpfc_nlp_put(ndlp);
4027 rdata->pnode = NULL;
4028 }
4026 /* drop reference for earlier registeration */ 4029 /* drop reference for earlier registeration */
4027 put_device(&rport->dev); 4030 put_device(&rport->dev);
4028 } 4031 }
@@ -4344,9 +4347,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4344{ 4347{
4345 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4348 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4346 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4349 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4347 init_timer(&ndlp->nlp_delayfunc); 4350 setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
4348 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 4351 (unsigned long)ndlp);
4349 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4350 ndlp->nlp_DID = did; 4352 ndlp->nlp_DID = did;
4351 ndlp->vport = vport; 4353 ndlp->vport = vport;
4352 ndlp->phba = vport->phba; 4354 ndlp->phba = vport->phba;
@@ -4606,9 +4608,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4606 pring = qp->pring; 4608 pring = qp->pring;
4607 if (!pring) 4609 if (!pring)
4608 continue; 4610 continue;
4609 spin_lock_irq(&pring->ring_lock); 4611 spin_lock(&pring->ring_lock);
4610 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); 4612 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4611 spin_unlock_irq(&pring->ring_lock); 4613 spin_unlock(&pring->ring_lock);
4612 } 4614 }
4613 spin_unlock_irq(&phba->hbalock); 4615 spin_unlock_irq(&phba->hbalock);
4614} 4616}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index cfdb068a3bfc..15277705cb6b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1001,7 +1001,7 @@ struct eq_delay_info {
1001 uint32_t phase; 1001 uint32_t phase;
1002 uint32_t delay_multi; 1002 uint32_t delay_multi;
1003}; 1003};
1004#define LPFC_MAX_EQ_DELAY 8 1004#define LPFC_MAX_EQ_DELAY_EQID_CNT 8
1005 1005
1006struct sgl_page_pairs { 1006struct sgl_page_pairs {
1007 uint32_t sgl_pg0_addr_lo; 1007 uint32_t sgl_pg0_addr_lo;
@@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
1070 union { 1070 union {
1071 struct { 1071 struct {
1072 uint32_t num_eq; 1072 uint32_t num_eq;
1073 struct eq_delay_info eq[LPFC_MAX_EQ_DELAY]; 1073 struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
1074 } request; 1074 } request;
1075 struct { 1075 struct {
1076 uint32_t word0; 1076 uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ee429d773f3..2697d49da4d7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3555,6 +3555,44 @@ out_free_mem:
3555 return rc; 3555 return rc;
3556} 3556}
3557 3557
3558static uint64_t
3559lpfc_get_wwpn(struct lpfc_hba *phba)
3560{
3561 uint64_t wwn;
3562 int rc;
3563 LPFC_MBOXQ_t *mboxq;
3564 MAILBOX_t *mb;
3565
3566
3567 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3568 GFP_KERNEL);
3569 if (!mboxq)
3570 return (uint64_t)-1;
3571
3572 /* First get WWN of HBA instance */
3573 lpfc_read_nv(phba, mboxq);
3574 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3575 if (rc != MBX_SUCCESS) {
3576 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3577 "6019 Mailbox failed , mbxCmd x%x "
3578 "READ_NV, mbxStatus x%x\n",
3579 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
3580 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
3581 mempool_free(mboxq, phba->mbox_mem_pool);
3582 return (uint64_t) -1;
3583 }
3584 mb = &mboxq->u.mb;
3585 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
3586 /* wwn is WWPN of HBA instance */
3587 mempool_free(mboxq, phba->mbox_mem_pool);
3588 if (phba->sli_rev == LPFC_SLI_REV4)
3589 return be64_to_cpu(wwn);
3590 else
3591 return (((wwn & 0xffffffff00000000) >> 32) |
3592 ((wwn & 0x00000000ffffffff) << 32));
3593
3594}
3595
3558/** 3596/**
3559 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping 3597 * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3560 * @phba: pointer to lpfc hba data structure. 3598 * @phba: pointer to lpfc hba data structure.
@@ -3676,17 +3714,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3676 struct lpfc_vport *vport; 3714 struct lpfc_vport *vport;
3677 struct Scsi_Host *shost = NULL; 3715 struct Scsi_Host *shost = NULL;
3678 int error = 0; 3716 int error = 0;
3717 int i;
3718 uint64_t wwn;
3719 bool use_no_reset_hba = false;
3720
3721 wwn = lpfc_get_wwpn(phba);
3722
3723 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
3724 if (wwn == lpfc_no_hba_reset[i]) {
3725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3726 "6020 Setting use_no_reset port=%llx\n",
3727 wwn);
3728 use_no_reset_hba = true;
3729 break;
3730 }
3731 }
3679 3732
3680 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 3733 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3681 if (dev != &phba->pcidev->dev) { 3734 if (dev != &phba->pcidev->dev) {
3682 shost = scsi_host_alloc(&lpfc_vport_template, 3735 shost = scsi_host_alloc(&lpfc_vport_template,
3683 sizeof(struct lpfc_vport)); 3736 sizeof(struct lpfc_vport));
3684 } else { 3737 } else {
3685 if (phba->sli_rev == LPFC_SLI_REV4) 3738 if (!use_no_reset_hba)
3686 shost = scsi_host_alloc(&lpfc_template, 3739 shost = scsi_host_alloc(&lpfc_template,
3687 sizeof(struct lpfc_vport)); 3740 sizeof(struct lpfc_vport));
3688 else 3741 else
3689 shost = scsi_host_alloc(&lpfc_template_s3, 3742 shost = scsi_host_alloc(&lpfc_template_no_hr,
3690 sizeof(struct lpfc_vport)); 3743 sizeof(struct lpfc_vport));
3691 } 3744 }
3692 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 3745 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
@@ -3734,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3734 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3787 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3735 spin_lock_init(&vport->work_port_lock); 3788 spin_lock_init(&vport->work_port_lock);
3736 3789
3737 init_timer(&vport->fc_disctmo); 3790 setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
3738 vport->fc_disctmo.function = lpfc_disc_timeout; 3791 (unsigned long)vport);
3739 vport->fc_disctmo.data = (unsigned long)vport;
3740 3792
3741 init_timer(&vport->els_tmofunc); 3793 setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
3742 vport->els_tmofunc.function = lpfc_els_timeout; 3794 (unsigned long)vport);
3743 vport->els_tmofunc.data = (unsigned long)vport;
3744 3795
3745 init_timer(&vport->delayed_disc_tmo); 3796 setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
3746 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3797 (unsigned long)vport);
3747 vport->delayed_disc_tmo.data = (unsigned long)vport;
3748 3798
3749 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3799 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3750 if (error) 3800 if (error)
@@ -5406,21 +5456,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5406 INIT_LIST_HEAD(&phba->luns); 5456 INIT_LIST_HEAD(&phba->luns);
5407 5457
5408 /* MBOX heartbeat timer */ 5458 /* MBOX heartbeat timer */
5409 init_timer(&psli->mbox_tmo); 5459 setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
5410 psli->mbox_tmo.function = lpfc_mbox_timeout;
5411 psli->mbox_tmo.data = (unsigned long) phba;
5412 /* Fabric block timer */ 5460 /* Fabric block timer */
5413 init_timer(&phba->fabric_block_timer); 5461 setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
5414 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5462 (unsigned long)phba);
5415 phba->fabric_block_timer.data = (unsigned long) phba;
5416 /* EA polling mode timer */ 5463 /* EA polling mode timer */
5417 init_timer(&phba->eratt_poll); 5464 setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
5418 phba->eratt_poll.function = lpfc_poll_eratt; 5465 (unsigned long)phba);
5419 phba->eratt_poll.data = (unsigned long) phba;
5420 /* Heartbeat timer */ 5466 /* Heartbeat timer */
5421 init_timer(&phba->hb_tmofunc); 5467 setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
5422 phba->hb_tmofunc.function = lpfc_hb_timeout;
5423 phba->hb_tmofunc.data = (unsigned long)phba;
5424 5468
5425 return 0; 5469 return 0;
5426} 5470}
@@ -5446,9 +5490,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5446 */ 5490 */
5447 5491
5448 /* FCP polling mode timer */ 5492 /* FCP polling mode timer */
5449 init_timer(&phba->fcp_poll_timer); 5493 setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
5450 phba->fcp_poll_timer.function = lpfc_poll_timeout; 5494 (unsigned long)phba);
5451 phba->fcp_poll_timer.data = (unsigned long) phba;
5452 5495
5453 /* Host attention work mask setup */ 5496 /* Host attention work mask setup */
5454 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5497 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5482,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5482 5525
5483 /* Initialize the host templates the configured values. */ 5526 /* Initialize the host templates the configured values. */
5484 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5527 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5485 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt; 5528 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5529 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5486 5530
5487 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5531 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5488 if (phba->cfg_enable_bg) { 5532 if (phba->cfg_enable_bg) {
@@ -5617,14 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5617 * Initialize timers used by driver 5661 * Initialize timers used by driver
5618 */ 5662 */
5619 5663
5620 init_timer(&phba->rrq_tmr); 5664 setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
5621 phba->rrq_tmr.function = lpfc_rrq_timeout;
5622 phba->rrq_tmr.data = (unsigned long)phba;
5623 5665
5624 /* FCF rediscover timer */ 5666 /* FCF rediscover timer */
5625 init_timer(&phba->fcf.redisc_wait); 5667 setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
5626 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5668 (unsigned long)phba);
5627 phba->fcf.redisc_wait.data = (unsigned long)phba;
5628 5669
5629 /* 5670 /*
5630 * Control structure for handling external multi-buffer mailbox 5671 * Control structure for handling external multi-buffer mailbox
@@ -5706,6 +5747,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5706 /* Initialize the host templates with the updated values. */ 5747 /* Initialize the host templates with the updated values. */
5707 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5748 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5708 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5749 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5750 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5709 5751
5710 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5752 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5711 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5753 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5736,6 +5778,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5736 /* Initialize the Abort nvme buffer list used by driver */ 5778 /* Initialize the Abort nvme buffer list used by driver */
5737 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5779 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5738 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5780 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5781 /* Fast-path XRI aborted CQ Event work queue list */
5782 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5739 } 5783 }
5740 5784
5741 /* This abort list used by worker thread */ 5785 /* This abort list used by worker thread */
@@ -8712,12 +8756,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8712 } 8756 }
8713 } 8757 }
8714 8758
8715 /* 8759 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
8716 * Configure EQ delay multipier for interrupt coalescing using
8717 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
8718 */
8719 for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
8720 lpfc_modify_hba_eq_delay(phba, qidx); 8760 lpfc_modify_hba_eq_delay(phba, qidx);
8761
8721 return 0; 8762 return 0;
8722 8763
8723out_destroy: 8764out_destroy:
@@ -8973,6 +9014,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
8973 /* Pending ELS XRI abort events */ 9014 /* Pending ELS XRI abort events */
8974 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9015 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8975 &cqelist); 9016 &cqelist);
9017 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9018 /* Pending NVME XRI abort events */
9019 list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
9020 &cqelist);
9021 }
8976 /* Pending asynnc events */ 9022 /* Pending asynnc events */
8977 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9023 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
8978 &cqelist); 9024 &cqelist);
@@ -10400,12 +10446,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10400 fc_remove_host(shost); 10446 fc_remove_host(shost);
10401 scsi_remove_host(shost); 10447 scsi_remove_host(shost);
10402 10448
10403 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
10404 * localports are destroyed after to cleanup all transport memory.
10405 */
10406 lpfc_cleanup(vport); 10449 lpfc_cleanup(vport);
10407 lpfc_nvmet_destroy_targetport(phba);
10408 lpfc_nvme_destroy_localport(vport);
10409 10450
10410 /* 10451 /*
10411 * Bring down the SLI Layer. This step disable all interrupts, 10452 * Bring down the SLI Layer. This step disable all interrupts,
@@ -12018,6 +12059,7 @@ static struct pci_driver lpfc_driver = {
12018 .id_table = lpfc_id_table, 12059 .id_table = lpfc_id_table,
12019 .probe = lpfc_pci_probe_one, 12060 .probe = lpfc_pci_probe_one,
12020 .remove = lpfc_pci_remove_one, 12061 .remove = lpfc_pci_remove_one,
12062 .shutdown = lpfc_pci_remove_one,
12021 .suspend = lpfc_pci_suspend_one, 12063 .suspend = lpfc_pci_suspend_one,
12022 .resume = lpfc_pci_resume_one, 12064 .resume = lpfc_pci_resume_one,
12023 .err_handler = &lpfc_err_handler, 12065 .err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index c61d8d692ede..5986c7957199 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
646 } 646 }
647 647
648 dma_buf->iocbq = lpfc_sli_get_iocbq(phba); 648 dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
649 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
650 if (!dma_buf->iocbq) { 649 if (!dma_buf->iocbq) {
651 kfree(dma_buf->context); 650 kfree(dma_buf->context);
652 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt, 651 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
@@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
658 "2621 Ran out of nvmet iocb/WQEs\n"); 657 "2621 Ran out of nvmet iocb/WQEs\n");
659 return NULL; 658 return NULL;
660 } 659 }
660 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
661 nvmewqe = dma_buf->iocbq; 661 nvmewqe = dma_buf->iocbq;
662 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 662 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
663 /* Initialize WQE */ 663 /* Initialize WQE */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 609a908ea9db..0a4c19081409 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
316 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); 316 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
317 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); 317 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
318 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); 318 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
319 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL); 319 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
320 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); 320 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
321 321
322 /* Word 6 */ 322 /* Word 6 */
@@ -620,15 +620,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
620 * Embed the payload in the last half of the WQE 620 * Embed the payload in the last half of the WQE
621 * WQE words 16-30 get the NVME CMD IU payload 621 * WQE words 16-30 get the NVME CMD IU payload
622 * 622 *
623 * WQE Word 16 is already setup with flags 623 * WQE words 16-19 get payload Words 1-4
624 * WQE words 17-19 get payload Words 2-4
625 * WQE words 20-21 get payload Words 6-7 624 * WQE words 20-21 get payload Words 6-7
626 * WQE words 22-29 get payload Words 16-23 625 * WQE words 22-29 get payload Words 16-23
627 */ 626 */
628 wptr = &wqe->words[17]; /* WQE ptr */ 627 wptr = &wqe->words[16]; /* WQE ptr */
629 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ 628 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
630 dptr += 2; /* Skip Words 0-1 in payload */ 629 dptr++; /* Skip Word 0 in payload */
631 630
631 *wptr++ = *dptr++; /* Word 1 */
632 *wptr++ = *dptr++; /* Word 2 */ 632 *wptr++ = *dptr++; /* Word 2 */
633 *wptr++ = *dptr++; /* Word 3 */ 633 *wptr++ = *dptr++; /* Word 3 */
634 *wptr++ = *dptr++; /* Word 4 */ 634 *wptr++ = *dptr++; /* Word 4 */
@@ -978,9 +978,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
978 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 978 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
979 NVME_WRITE_CMD); 979 NVME_WRITE_CMD);
980 980
981 /* Word 16 */
982 wqe->words[16] = LPFC_NVME_EMBED_WRITE;
983
984 phba->fc4NvmeOutputRequests++; 981 phba->fc4NvmeOutputRequests++;
985 } else { 982 } else {
986 /* Word 7 */ 983 /* Word 7 */
@@ -1002,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1002 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 999 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1003 NVME_READ_CMD); 1000 NVME_READ_CMD);
1004 1001
1005 /* Word 16 */
1006 wqe->words[16] = LPFC_NVME_EMBED_READ;
1007
1008 phba->fc4NvmeInputRequests++; 1002 phba->fc4NvmeInputRequests++;
1009 } 1003 }
1010 } else { 1004 } else {
@@ -1026,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1026 /* Word 11 */ 1020 /* Word 11 */
1027 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD); 1021 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
1028 1022
1029 /* Word 16 */
1030 wqe->words[16] = LPFC_NVME_EMBED_CMD;
1031
1032 phba->fc4NvmeControlRequests++; 1023 phba->fc4NvmeControlRequests++;
1033 } 1024 }
1034 /* 1025 /*
@@ -1286,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1286 pnvme_fcreq->private = (void *)lpfc_ncmd; 1277 pnvme_fcreq->private = (void *)lpfc_ncmd;
1287 lpfc_ncmd->nvmeCmd = pnvme_fcreq; 1278 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1288 lpfc_ncmd->nrport = rport; 1279 lpfc_ncmd->nrport = rport;
1280 lpfc_ncmd->ndlp = ndlp;
1289 lpfc_ncmd->start_time = jiffies; 1281 lpfc_ncmd->start_time = jiffies;
1290 1282
1291 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp); 1283 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1319,7 +1311,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1319 "sid: x%x did: x%x oxid: x%x\n", 1311 "sid: x%x did: x%x oxid: x%x\n",
1320 ret, vport->fc_myDID, ndlp->nlp_DID, 1312 ret, vport->fc_myDID, ndlp->nlp_DID,
1321 lpfc_ncmd->cur_iocbq.sli4_xritag); 1313 lpfc_ncmd->cur_iocbq.sli4_xritag);
1322 ret = -EINVAL; 1314 ret = -EBUSY;
1323 goto out_free_nvme_buf; 1315 goto out_free_nvme_buf;
1324 } 1316 }
1325 1317
@@ -1821,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1821 pdma_phys_sgl1, cur_xritag); 1813 pdma_phys_sgl1, cur_xritag);
1822 if (status) { 1814 if (status) {
1823 /* failure, put on abort nvme list */ 1815 /* failure, put on abort nvme list */
1824 lpfc_ncmd->exch_busy = 1; 1816 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1825 } else { 1817 } else {
1826 /* success, put on NVME buffer list */ 1818 /* success, put on NVME buffer list */
1827 lpfc_ncmd->exch_busy = 0; 1819 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1828 lpfc_ncmd->status = IOSTAT_SUCCESS; 1820 lpfc_ncmd->status = IOSTAT_SUCCESS;
1829 num_posted++; 1821 num_posted++;
1830 } 1822 }
@@ -1854,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1854 struct lpfc_nvme_buf, list); 1846 struct lpfc_nvme_buf, list);
1855 if (status) { 1847 if (status) {
1856 /* failure, put on abort nvme list */ 1848 /* failure, put on abort nvme list */
1857 lpfc_ncmd->exch_busy = 1; 1849 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1858 } else { 1850 } else {
1859 /* success, put on NVME buffer list */ 1851 /* success, put on NVME buffer list */
1860 lpfc_ncmd->exch_busy = 0; 1852 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1861 lpfc_ncmd->status = IOSTAT_SUCCESS; 1853 lpfc_ncmd->status = IOSTAT_SUCCESS;
1862 num_posted++; 1854 num_posted++;
1863 } 1855 }
@@ -2099,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2099 unsigned long iflag = 0; 2091 unsigned long iflag = 0;
2100 2092
2101 lpfc_ncmd->nonsg_phys = 0; 2093 lpfc_ncmd->nonsg_phys = 0;
2102 if (lpfc_ncmd->exch_busy) { 2094 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2103 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, 2095 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2104 iflag); 2096 iflag);
2105 lpfc_ncmd->nvmeCmd = NULL; 2097 lpfc_ncmd->nvmeCmd = NULL;
@@ -2135,11 +2127,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2135int 2127int
2136lpfc_nvme_create_localport(struct lpfc_vport *vport) 2128lpfc_nvme_create_localport(struct lpfc_vport *vport)
2137{ 2129{
2130 int ret = 0;
2138 struct lpfc_hba *phba = vport->phba; 2131 struct lpfc_hba *phba = vport->phba;
2139 struct nvme_fc_port_info nfcp_info; 2132 struct nvme_fc_port_info nfcp_info;
2140 struct nvme_fc_local_port *localport; 2133 struct nvme_fc_local_port *localport;
2141 struct lpfc_nvme_lport *lport; 2134 struct lpfc_nvme_lport *lport;
2142 int len, ret = 0; 2135 int len;
2143 2136
2144 /* Initialize this localport instance. The vport wwn usage ensures 2137 /* Initialize this localport instance. The vport wwn usage ensures
2145 * that NPIV is accounted for. 2138 * that NPIV is accounted for.
@@ -2156,8 +2149,12 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2156 /* localport is allocated from the stack, but the registration 2149 /* localport is allocated from the stack, but the registration
2157 * call allocates heap memory as well as the private area. 2150 * call allocates heap memory as well as the private area.
2158 */ 2151 */
2152#ifdef CONFIG_LPFC_NVME_INITIATOR
2159 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2160 &vport->phba->pcidev->dev, &localport); 2154 &vport->phba->pcidev->dev, &localport);
2155#else
2156 ret = -ENOMEM;
2157#endif
2161 if (!ret) { 2158 if (!ret) {
2162 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2159 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2163 "6005 Successfully registered local " 2160 "6005 Successfully registered local "
@@ -2173,10 +2170,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2173 lport->vport = vport; 2170 lport->vport = vport;
2174 INIT_LIST_HEAD(&lport->rport_list); 2171 INIT_LIST_HEAD(&lport->rport_list);
2175 vport->nvmei_support = 1; 2172 vport->nvmei_support = 1;
2173 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2174 vport->phba->total_nvme_bufs += len;
2176 } 2175 }
2177 2176
2178 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2179 vport->phba->total_nvme_bufs += len;
2180 return ret; 2177 return ret;
2181} 2178}
2182 2179
@@ -2193,6 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2193void 2190void
2194lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2191lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2195{ 2192{
2193#ifdef CONFIG_LPFC_NVME_INITIATOR
2196 struct nvme_fc_local_port *localport; 2194 struct nvme_fc_local_port *localport;
2197 struct lpfc_nvme_lport *lport; 2195 struct lpfc_nvme_lport *lport;
2198 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; 2196 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2208,7 +2206,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2208 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2206 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2209 "6011 Destroying NVME localport %p\n", 2207 "6011 Destroying NVME localport %p\n",
2210 localport); 2208 localport);
2211
2212 list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) { 2209 list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
2213 /* The last node ref has to get released now before the rport 2210 /* The last node ref has to get released now before the rport
2214 * private memory area is released by the transport. 2211 * private memory area is released by the transport.
@@ -2222,6 +2219,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2222 "6008 rport fail destroy %x\n", ret); 2219 "6008 rport fail destroy %x\n", ret);
2223 wait_for_completion_timeout(&rport->rport_unreg_done, 5); 2220 wait_for_completion_timeout(&rport->rport_unreg_done, 5);
2224 } 2221 }
2222
2225 /* lport's rport list is clear. Unregister 2223 /* lport's rport list is clear. Unregister
2226 * lport and release resources. 2224 * lport and release resources.
2227 */ 2225 */
@@ -2245,6 +2243,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2245 "Failed, status x%x\n", 2243 "Failed, status x%x\n",
2246 ret); 2244 ret);
2247 } 2245 }
2246#endif
2248} 2247}
2249 2248
2250void 2249void
@@ -2275,6 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
2275int 2274int
2276lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2275lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2277{ 2276{
2277#ifdef CONFIG_LPFC_NVME_INITIATOR
2278 int ret = 0; 2278 int ret = 0;
2279 struct nvme_fc_local_port *localport; 2279 struct nvme_fc_local_port *localport;
2280 struct lpfc_nvme_lport *lport; 2280 struct lpfc_nvme_lport *lport;
@@ -2348,7 +2348,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2348 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; 2348 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2349 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2349 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2350 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2350 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2351
2352 ret = nvme_fc_register_remoteport(localport, &rpinfo, 2351 ret = nvme_fc_register_remoteport(localport, &rpinfo,
2353 &remote_port); 2352 &remote_port);
2354 if (!ret) { 2353 if (!ret) {
@@ -2384,6 +2383,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2384 ndlp->nlp_type, ndlp->nlp_DID, ndlp); 2383 ndlp->nlp_type, ndlp->nlp_DID, ndlp);
2385 } 2384 }
2386 return ret; 2385 return ret;
2386#else
2387 return 0;
2388#endif
2387} 2389}
2388 2390
2389/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. 2391/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
@@ -2401,6 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2401void 2403void
2402lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2404lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2403{ 2405{
2406#ifdef CONFIG_LPFC_NVME_INITIATOR
2404 int ret; 2407 int ret;
2405 struct nvme_fc_local_port *localport; 2408 struct nvme_fc_local_port *localport;
2406 struct lpfc_nvme_lport *lport; 2409 struct lpfc_nvme_lport *lport;
@@ -2458,7 +2461,61 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2458 return; 2461 return;
2459 2462
2460 input_err: 2463 input_err:
2464#endif
2461 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2465 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2462 "6168: State error: lport %p, rport%p FCID x%06x\n", 2466 "6168: State error: lport %p, rport%p FCID x%06x\n",
2463 vport->localport, ndlp->rport, ndlp->nlp_DID); 2467 vport->localport, ndlp->rport, ndlp->nlp_DID);
2464} 2468}
2469
2470/**
2471 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2472 * @phba: pointer to lpfc hba data structure.
2473 * @axri: pointer to the fcp xri abort wcqe structure.
2474 *
2475 * This routine is invoked by the worker thread to process a SLI4 fast-path
2476 * FCP aborted xri.
2477 **/
2478void
2479lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2480 struct sli4_wcqe_xri_aborted *axri)
2481{
2482 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2483 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
2484 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2485 struct lpfc_nodelist *ndlp;
2486 unsigned long iflag = 0;
2487 int rrq_empty = 0;
2488
2489 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2490 return;
2491 spin_lock_irqsave(&phba->hbalock, iflag);
2492 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2493 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2494 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2495 list) {
2496 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2497 list_del(&lpfc_ncmd->list);
2498 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2499 lpfc_ncmd->status = IOSTAT_SUCCESS;
2500 spin_unlock(
2501 &phba->sli4_hba.abts_nvme_buf_list_lock);
2502
2503 rrq_empty = list_empty(&phba->active_rrq_list);
2504 spin_unlock_irqrestore(&phba->hbalock, iflag);
2505 ndlp = lpfc_ncmd->ndlp;
2506 if (ndlp) {
2507 lpfc_set_rrq_active(
2508 phba, ndlp,
2509 lpfc_ncmd->cur_iocbq.sli4_lxritag,
2510 rxid, 1);
2511 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2512 }
2513 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2514 if (rrq_empty)
2515 lpfc_worker_wake_up(phba);
2516 return;
2517 }
2518 }
2519 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2520 spin_unlock_irqrestore(&phba->hbalock, iflag);
2521}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index b2fae5e813f8..1347deb8dd6c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
57 struct list_head list; 57 struct list_head list;
58 struct nvmefc_fcp_req *nvmeCmd; 58 struct nvmefc_fcp_req *nvmeCmd;
59 struct lpfc_nvme_rport *nrport; 59 struct lpfc_nvme_rport *nrport;
60 struct lpfc_nodelist *ndlp;
60 61
61 uint32_t timeout; 62 uint32_t timeout;
62 63
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c421e1738ee9..b7739a554fe0 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -571,6 +571,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
571 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 571 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
572 "6102 Bad state IO x%x aborted\n", 572 "6102 Bad state IO x%x aborted\n",
573 ctxp->oxid); 573 ctxp->oxid);
574 rc = -ENXIO;
574 goto aerr; 575 goto aerr;
575 } 576 }
576 577
@@ -580,6 +581,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
580 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 581 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
581 "6152 FCP Drop IO x%x: Prep\n", 582 "6152 FCP Drop IO x%x: Prep\n",
582 ctxp->oxid); 583 ctxp->oxid);
584 rc = -ENXIO;
583 goto aerr; 585 goto aerr;
584 } 586 }
585 587
@@ -618,8 +620,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
618 ctxp->wqeq->hba_wqidx = 0; 620 ctxp->wqeq->hba_wqidx = 0;
619 nvmewqeq->context2 = NULL; 621 nvmewqeq->context2 = NULL;
620 nvmewqeq->context3 = NULL; 622 nvmewqeq->context3 = NULL;
623 rc = -EBUSY;
621aerr: 624aerr:
622 return -ENXIO; 625 return rc;
623} 626}
624 627
625static void 628static void
@@ -668,9 +671,13 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
668 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
669 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; 672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
670 673
674#ifdef CONFIG_LPFC_NVME_TARGET
671 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
672 &phba->pcidev->dev, 676 &phba->pcidev->dev,
673 &phba->targetport); 677 &phba->targetport);
678#else
679 error = -ENOMEM;
680#endif
674 if (error) { 681 if (error) {
675 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 682 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
676 "6025 Cannot register NVME targetport " 683 "6025 Cannot register NVME targetport "
@@ -731,9 +738,25 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
731 return 0; 738 return 0;
732} 739}
733 740
741/**
742 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
743 * @phba: pointer to lpfc hba data structure.
744 * @axri: pointer to the nvmet xri abort wcqe structure.
745 *
746 * This routine is invoked by the worker thread to process a SLI4 fast-path
747 * NVMET aborted xri.
748 **/
749void
750lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
751 struct sli4_wcqe_xri_aborted *axri)
752{
753 /* TODO: work in progress */
754}
755
734void 756void
735lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 757lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
736{ 758{
759#ifdef CONFIG_LPFC_NVME_TARGET
737 struct lpfc_nvmet_tgtport *tgtp; 760 struct lpfc_nvmet_tgtport *tgtp;
738 761
739 if (phba->nvmet_support == 0) 762 if (phba->nvmet_support == 0)
@@ -745,6 +768,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
745 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 768 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
746 } 769 }
747 phba->targetport = NULL; 770 phba->targetport = NULL;
771#endif
748} 772}
749 773
750/** 774/**
@@ -764,6 +788,7 @@ static void
764lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 788lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
765 struct hbq_dmabuf *nvmebuf) 789 struct hbq_dmabuf *nvmebuf)
766{ 790{
791#ifdef CONFIG_LPFC_NVME_TARGET
767 struct lpfc_nvmet_tgtport *tgtp; 792 struct lpfc_nvmet_tgtport *tgtp;
768 struct fc_frame_header *fc_hdr; 793 struct fc_frame_header *fc_hdr;
769 struct lpfc_nvmet_rcv_ctx *ctxp; 794 struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -844,6 +869,7 @@ dropit:
844 869
845 atomic_inc(&tgtp->xmt_ls_abort); 870 atomic_inc(&tgtp->xmt_ls_abort);
846 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 871 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
872#endif
847} 873}
848 874
849/** 875/**
@@ -865,6 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
865 struct rqb_dmabuf *nvmebuf, 891 struct rqb_dmabuf *nvmebuf,
866 uint64_t isr_timestamp) 892 uint64_t isr_timestamp)
867{ 893{
894#ifdef CONFIG_LPFC_NVME_TARGET
868 struct lpfc_nvmet_rcv_ctx *ctxp; 895 struct lpfc_nvmet_rcv_ctx *ctxp;
869 struct lpfc_nvmet_tgtport *tgtp; 896 struct lpfc_nvmet_tgtport *tgtp;
870 struct fc_frame_header *fc_hdr; 897 struct fc_frame_header *fc_hdr;
@@ -955,7 +982,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
955 982
956 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 983 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
957 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 984 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
958 "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n", 985 "6159 FCP Drop IO x%x: err x%x\n",
959 ctxp->oxid, rc); 986 ctxp->oxid, rc);
960dropit: 987dropit:
961 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 988 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
@@ -970,6 +997,7 @@ dropit:
970 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 997 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
971 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 998 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
972 } 999 }
1000#endif
973} 1001}
974 1002
975/** 1003/**
@@ -1114,7 +1142,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1114 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 1142 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1115 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 1143 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1116 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 1144 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1117 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL); 1145 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1118 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 1146 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1119 1147
1120 /* Word 6 */ 1148 /* Word 6 */
@@ -1445,7 +1473,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1445 1473
1446 case NVMET_FCOP_RSP: 1474 case NVMET_FCOP_RSP:
1447 /* Words 0 - 2 */ 1475 /* Words 0 - 2 */
1448 sgel = &rsp->sg[0];
1449 physaddr = rsp->rspdma; 1476 physaddr = rsp->rspdma;
1450 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1477 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1451 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 1478 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
@@ -1681,8 +1708,8 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1681 struct lpfc_nodelist *ndlp; 1708 struct lpfc_nodelist *ndlp;
1682 1709
1683 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1710 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1684 "6067 %s: Entrypoint: sid %x xri %x\n", __func__, 1711 "6067 Abort: sid %x xri x%x/x%x\n",
1685 sid, xri); 1712 sid, xri, ctxp->wqeq->sli4_xritag);
1686 1713
1687 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1714 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1688 1715
@@ -1693,7 +1720,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1693 atomic_inc(&tgtp->xmt_abort_rsp_error); 1720 atomic_inc(&tgtp->xmt_abort_rsp_error);
1694 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1721 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1695 "6134 Drop ABTS - wrong NDLP state x%x.\n", 1722 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1696 ndlp->nlp_state); 1723 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1697 1724
1698 /* No failure to an ABTS request. */ 1725 /* No failure to an ABTS request. */
1699 return 0; 1726 return 0;
@@ -1791,7 +1818,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
1791 atomic_inc(&tgtp->xmt_abort_rsp_error); 1818 atomic_inc(&tgtp->xmt_abort_rsp_error);
1792 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1819 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1793 "6160 Drop ABTS - wrong NDLP state x%x.\n", 1820 "6160 Drop ABTS - wrong NDLP state x%x.\n",
1794 ndlp->nlp_state); 1821 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1795 1822
1796 /* No failure to an ABTS request. */ 1823 /* No failure to an ABTS request. */
1797 return 0; 1824 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9d6384af9fce..54fd0c81ceaf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
5953 .track_queue_depth = 0, 5953 .track_queue_depth = 0,
5954}; 5954};
5955 5955
5956struct scsi_host_template lpfc_template_s3 = { 5956struct scsi_host_template lpfc_template_no_hr = {
5957 .module = THIS_MODULE, 5957 .module = THIS_MODULE,
5958 .name = LPFC_DRIVER_NAME, 5958 .name = LPFC_DRIVER_NAME,
5959 .proc_name = LPFC_DRIVER_NAME, 5959 .proc_name = LPFC_DRIVER_NAME,
5960 .info = lpfc_info, 5960 .info = lpfc_info,
5961 .queuecommand = lpfc_queuecommand, 5961 .queuecommand = lpfc_queuecommand,
5962 .eh_timed_out = fc_eh_timed_out,
5962 .eh_abort_handler = lpfc_abort_handler, 5963 .eh_abort_handler = lpfc_abort_handler,
5963 .eh_device_reset_handler = lpfc_device_reset_handler, 5964 .eh_device_reset_handler = lpfc_device_reset_handler,
5964 .eh_target_reset_handler = lpfc_target_reset_handler, 5965 .eh_target_reset_handler = lpfc_target_reset_handler,
@@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
6015 .eh_abort_handler = lpfc_abort_handler, 6016 .eh_abort_handler = lpfc_abort_handler,
6016 .eh_device_reset_handler = lpfc_device_reset_handler, 6017 .eh_device_reset_handler = lpfc_device_reset_handler,
6017 .eh_target_reset_handler = lpfc_target_reset_handler, 6018 .eh_target_reset_handler = lpfc_target_reset_handler,
6018 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6019 .slave_alloc = lpfc_slave_alloc, 6019 .slave_alloc = lpfc_slave_alloc,
6020 .slave_configure = lpfc_slave_configure, 6020 .slave_configure = lpfc_slave_configure,
6021 .slave_destroy = lpfc_slave_destroy, 6021 .slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e43e5e23c24b..1c9fa45df7eb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,3 +1,4 @@
1
1/******************************************************************* 2/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 3 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 4 * Fibre Channel Host Bus Adapters. *
@@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
952 start_sglq = sglq; 953 start_sglq = sglq;
953 while (!found) { 954 while (!found) {
954 if (!sglq) 955 if (!sglq)
955 return NULL; 956 break;
956 if (ndlp && ndlp->active_rrqs_xri_bitmap && 957 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
957 test_bit(sglq->sli4_lxritag, 958 test_bit(sglq->sli4_lxritag,
958 ndlp->active_rrqs_xri_bitmap)) { 959 ndlp->active_rrqs_xri_bitmap)) {
@@ -12213,6 +12214,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12213} 12214}
12214 12215
12215/** 12216/**
12217 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12218 * @phba: pointer to lpfc hba data structure.
12219 *
12220 * This routine is invoked by the worker thread to process all the pending
12221 * SLI4 NVME abort XRI events.
12222 **/
12223void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12224{
12225 struct lpfc_cq_event *cq_event;
12226
12227 /* First, declare the fcp xri abort event has been handled */
12228 spin_lock_irq(&phba->hbalock);
12229 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12230 spin_unlock_irq(&phba->hbalock);
12231 /* Now, handle all the fcp xri abort events */
12232 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12233 /* Get the first event from the head of the event queue */
12234 spin_lock_irq(&phba->hbalock);
12235 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12236 cq_event, struct lpfc_cq_event, list);
12237 spin_unlock_irq(&phba->hbalock);
12238 /* Notify aborted XRI for NVME work queue */
12239 if (phba->nvmet_support) {
12240 lpfc_sli4_nvmet_xri_aborted(phba,
12241 &cq_event->cqe.wcqe_axri);
12242 } else {
12243 lpfc_sli4_nvme_xri_aborted(phba,
12244 &cq_event->cqe.wcqe_axri);
12245 }
12246 /* Free the event processed back to the free pool */
12247 lpfc_sli4_cq_event_release(phba, cq_event);
12248 }
12249}
12250
12251/**
12216 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12252 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12217 * @phba: pointer to lpfc hba data structure. 12253 * @phba: pointer to lpfc hba data structure.
12218 * 12254 *
@@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12709 spin_unlock_irqrestore(&phba->hbalock, iflags); 12745 spin_unlock_irqrestore(&phba->hbalock, iflags);
12710 workposted = true; 12746 workposted = true;
12711 break; 12747 break;
12748 case LPFC_NVME:
12749 spin_lock_irqsave(&phba->hbalock, iflags);
12750 list_add_tail(&cq_event->list,
12751 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
12752 /* Set the nvme xri abort event flag */
12753 phba->hba_flag |= NVME_XRI_ABORT_EVENT;
12754 spin_unlock_irqrestore(&phba->hbalock, iflags);
12755 workposted = true;
12756 break;
12712 default: 12757 default:
12713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12714 "0603 Invalid work queue CQE subtype (x%x)\n", 12759 "0603 Invalid CQ subtype %d: "
12715 cq->subtype); 12760 "%08x %08x %08x %08x\n",
12761 cq->subtype, wcqe->word0, wcqe->parameter,
12762 wcqe->word2, wcqe->word3);
12763 lpfc_sli4_cq_event_release(phba, cq_event);
12716 workposted = false; 12764 workposted = false;
12717 break; 12765 break;
12718 } 12766 }
@@ -13827,6 +13875,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13827 * @startq: The starting FCP EQ to modify 13875 * @startq: The starting FCP EQ to modify
13828 * 13876 *
13829 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 13877 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13878 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
13879 * updated in one mailbox command.
13830 * 13880 *
13831 * The @phba struct is used to send mailbox command to HBA. The @startq 13881 * The @phba struct is used to send mailbox command to HBA. The @startq
13832 * is used to get the starting FCP EQ to change. 13882 * is used to get the starting FCP EQ to change.
@@ -13879,7 +13929,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
13879 eq_delay->u.request.eq[cnt].phase = 0; 13929 eq_delay->u.request.eq[cnt].phase = 0;
13880 eq_delay->u.request.eq[cnt].delay_multi = dmult; 13930 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13881 cnt++; 13931 cnt++;
13882 if (cnt >= LPFC_MAX_EQ_DELAY) 13932 if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
13883 break; 13933 break;
13884 } 13934 }
13885 eq_delay->u.request.num_eq = cnt; 13935 eq_delay->u.request.num_eq = cnt;
@@ -15185,17 +15235,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15185 drq = drqp[idx]; 15235 drq = drqp[idx];
15186 cq = cqp[idx]; 15236 cq = cqp[idx];
15187 15237
15188 if (hrq->entry_count != drq->entry_count) {
15189 status = -EINVAL;
15190 goto out;
15191 }
15192
15193 /* sanity check on queue memory */ 15238 /* sanity check on queue memory */
15194 if (!hrq || !drq || !cq) { 15239 if (!hrq || !drq || !cq) {
15195 status = -ENODEV; 15240 status = -ENODEV;
15196 goto out; 15241 goto out;
15197 } 15242 }
15198 15243
15244 if (hrq->entry_count != drq->entry_count) {
15245 status = -EINVAL;
15246 goto out;
15247 }
15248
15199 if (idx == 0) { 15249 if (idx == 0) {
15200 bf_set(lpfc_mbx_rq_create_num_pages, 15250 bf_set(lpfc_mbx_rq_create_num_pages,
15201 &rq_create->u.request, 15251 &rq_create->u.request,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 91153c9f6d18..710458cf11d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
642 struct list_head sp_asynce_work_queue; 642 struct list_head sp_asynce_work_queue;
643 struct list_head sp_fcp_xri_aborted_work_queue; 643 struct list_head sp_fcp_xri_aborted_work_queue;
644 struct list_head sp_els_xri_aborted_work_queue; 644 struct list_head sp_els_xri_aborted_work_queue;
645 struct list_head sp_nvme_xri_aborted_work_queue;
645 struct list_head sp_unsol_work_queue; 646 struct list_head sp_unsol_work_queue;
646 struct lpfc_sli4_link link_state; 647 struct lpfc_sli4_link link_state;
647 struct lpfc_sli4_lnk_info lnk_info; 648 struct lpfc_sli4_lnk_info lnk_info;
@@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
794int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, 795int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
795 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); 796 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
796void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 797void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
798void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
797void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 799void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
798void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, 800void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
799 struct sli4_wcqe_xri_aborted *); 801 struct sli4_wcqe_xri_aborted *);
802void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
803 struct sli4_wcqe_xri_aborted *axri);
804void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
805 struct sli4_wcqe_xri_aborted *axri);
800void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, 806void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
801 struct sli4_wcqe_xri_aborted *); 807 struct sli4_wcqe_xri_aborted *);
802void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); 808void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 86c6c9b26b82..d4e95e28f4e3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "11.2.0.7" 23#define LPFC_DRIVER_VERSION "11.2.0.10"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 7fe7e6ed595b..8981806fb13f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
1442 u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); 1442 u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
1443extern struct sas_function_template mpt3sas_transport_functions; 1443extern struct sas_function_template mpt3sas_transport_functions;
1444extern struct scsi_transport_template *mpt3sas_transport_template; 1444extern struct scsi_transport_template *mpt3sas_transport_template;
1445extern int scsi_internal_device_block(struct scsi_device *sdev);
1446extern int scsi_internal_device_unblock(struct scsi_device *sdev,
1447 enum scsi_device_state new_state);
1448/* trigger data externs */ 1445/* trigger data externs */
1449void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 1446void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
1450 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); 1447 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 46e866c36c8a..919ba2bb15f1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
2859 sas_device_priv_data->sas_target->handle); 2859 sas_device_priv_data->sas_target->handle);
2860 sas_device_priv_data->block = 1; 2860 sas_device_priv_data->block = 1;
2861 2861
2862 r = scsi_internal_device_block(sdev); 2862 r = scsi_internal_device_block(sdev, false);
2863 if (r == -EINVAL) 2863 if (r == -EINVAL)
2864 sdev_printk(KERN_WARNING, sdev, 2864 sdev_printk(KERN_WARNING, sdev,
2865 "device_block failed with return(%d) for handle(0x%04x)\n", 2865 "device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
2895 "performing a block followed by an unblock\n", 2895 "performing a block followed by an unblock\n",
2896 r, sas_device_priv_data->sas_target->handle); 2896 r, sas_device_priv_data->sas_target->handle);
2897 sas_device_priv_data->block = 1; 2897 sas_device_priv_data->block = 1;
2898 r = scsi_internal_device_block(sdev); 2898 r = scsi_internal_device_block(sdev, false);
2899 if (r) 2899 if (r)
2900 sdev_printk(KERN_WARNING, sdev, "retried device_block " 2900 sdev_printk(KERN_WARNING, sdev, "retried device_block "
2901 "failed with return(%d) for handle(0x%04x)\n", 2901 "failed with return(%d) for handle(0x%04x)\n",
@@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4677 struct MPT3SAS_DEVICE *sas_device_priv_data; 4677 struct MPT3SAS_DEVICE *sas_device_priv_data;
4678 u32 response_code = 0; 4678 u32 response_code = 0;
4679 unsigned long flags; 4679 unsigned long flags;
4680 unsigned int sector_sz;
4681 4680
4682 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 4681 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4683 4682
@@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4742 } 4741 }
4743 4742
4744 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 4743 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
4745
4746 /* In case of bogus fw or device, we could end up having
4747 * unaligned partial completion. We can force alignment here,
4748 * then scsi-ml does not need to handle this misbehavior.
4749 */
4750 sector_sz = scmd->device->sector_size;
4751 if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
4752 xfer_cnt % sector_sz)) {
4753 sdev_printk(KERN_INFO, scmd->device,
4754 "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
4755 xfer_cnt, sector_sz);
4756 xfer_cnt = round_down(xfer_cnt, sector_sz);
4757 }
4758
4759 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 4744 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
4760 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 4745 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
4761 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4746 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 23bd70628a2f..7d173f48a81e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
81#define QEDF_INFO(pdev, level, fmt, ...) \ 81#define QEDF_INFO(pdev, level, fmt, ...) \
82 qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \ 82 qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
83 ## __VA_ARGS__) 83 ## __VA_ARGS__)
84 84__printf(4, 5)
85extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line, 85void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
86 const char *fmt, ...); 86 const char *fmt, ...);
87extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line, 87__printf(4, 5)
88void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
88 const char *, ...); 89 const char *, ...);
89extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, 90__printf(4, 5)
91void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
90 u32 line, const char *, ...); 92 u32 line, const char *, ...);
91extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line, 93__printf(5, 6)
94void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
92 u32 info, const char *fmt, ...); 95 u32 info, const char *fmt, ...);
93 96
94/* GRC Dump related defines */ 97/* GRC Dump related defines */
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 868d423380d1..ed58b9104f58 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -203,7 +203,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
203 case FIP_DT_MAC: 203 case FIP_DT_MAC:
204 mp = (struct fip_mac_desc *)desc; 204 mp = (struct fip_mac_desc *)desc;
205 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 205 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
206 "fd_mac=%pM.\n", __func__, mp->fd_mac); 206 "fd_mac=%pM\n", mp->fd_mac);
207 ether_addr_copy(cvl_mac, mp->fd_mac); 207 ether_addr_copy(cvl_mac, mp->fd_mac);
208 break; 208 break;
209 case FIP_DT_NAME: 209 case FIP_DT_NAME:
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9d3aba..46debe5034af 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1342,7 +1342,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1342 } else { 1342 } else {
1343 refcount = kref_read(&io_req->refcount); 1343 refcount = kref_read(&io_req->refcount);
1344 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1344 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1345 "%d:0:%d:%d xid=0x%0x op=0x%02x " 1345 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1346 "lba=%02x%02x%02x%02x cdb_status=%d " 1346 "lba=%02x%02x%02x%02x cdb_status=%d "
1347 "fcp_resid=0x%x refcount=%d.\n", 1347 "fcp_resid=0x%x refcount=%d.\n",
1348 qedf->lport->host->host_no, sc_cmd->device->id, 1348 qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1426,7 +1426,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1426 1426
1427 sc_cmd->result = result << 16; 1427 sc_cmd->result = result << 16;
1428 refcount = kref_read(&io_req->refcount); 1428 refcount = kref_read(&io_req->refcount);
1429 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing " 1429 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " 1430 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1431 "allowed=%d retries=%d refcount=%d.\n", 1431 "allowed=%d retries=%d refcount=%d.\n",
1432 qedf->lport->host->host_no, sc_cmd->device->id, 1432 qedf->lport->host->host_no, sc_cmd->device->id,
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d9d7a86b5f8b..8e2a160490e6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2456 } 2456 }
2457 2457
2458 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 2458 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2459 "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl, 2459 "BDQ PBL addr=0x%p dma=%pad\n",
2460 qedf->bdq_pbl_dma); 2460 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2461 2461
2462 /* 2462 /*
2463 * Populate BDQ PBL with physical and virtual address of individual 2463 * Populate BDQ PBL with physical and virtual address of individual
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 955936274241..59417199bf36 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -14,7 +14,7 @@
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17int do_not_recover; 17int qedi_do_not_recover;
18static struct dentry *qedi_dbg_root; 18static struct dentry *qedi_dbg_root;
19 19
20void 20void
@@ -74,22 +74,22 @@ qedi_dbg_exit(void)
74static ssize_t 74static ssize_t
75qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg) 75qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
76{ 76{
77 if (!do_not_recover) 77 if (!qedi_do_not_recover)
78 do_not_recover = 1; 78 qedi_do_not_recover = 1;
79 79
80 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", 80 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
81 do_not_recover); 81 qedi_do_not_recover);
82 return 0; 82 return 0;
83} 83}
84 84
85static ssize_t 85static ssize_t
86qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg) 86qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
87{ 87{
88 if (do_not_recover) 88 if (qedi_do_not_recover)
89 do_not_recover = 0; 89 qedi_do_not_recover = 0;
90 90
91 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", 91 QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
92 do_not_recover); 92 qedi_do_not_recover);
93 return 0; 93 return 0;
94} 94}
95 95
@@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
141 if (*ppos) 141 if (*ppos)
142 return 0; 142 return 0;
143 143
144 cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover); 144 cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
145 cnt = min_t(int, count, cnt - *ppos); 145 cnt = min_t(int, count, cnt - *ppos);
146 *ppos += cnt; 146 *ppos += cnt;
147 return cnt; 147 return cnt;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4e11b3..2bce3efc66a4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1461,9 +1461,9 @@ static void qedi_tmf_work(struct work_struct *work)
1461 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, 1461 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
1462 qedi_conn->iscsi_conn_id); 1462 qedi_conn->iscsi_conn_id);
1463 1463
1464 if (do_not_recover) { 1464 if (qedi_do_not_recover) {
1465 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", 1465 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
1466 do_not_recover); 1466 qedi_do_not_recover);
1467 goto abort_ret; 1467 goto abort_ret;
1468 } 1468 }
1469 1469
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8e488de88ece..63d793f46064 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -12,8 +12,14 @@
12 12
13#include "qedi_iscsi.h" 13#include "qedi_iscsi.h"
14 14
15#ifdef CONFIG_DEBUG_FS
16extern int qedi_do_not_recover;
17#else
18#define qedi_do_not_recover (0)
19#endif
20
15extern uint qedi_io_tracing; 21extern uint qedi_io_tracing;
16extern int do_not_recover; 22
17extern struct scsi_host_template qedi_host_template; 23extern struct scsi_host_template qedi_host_template;
18extern struct iscsi_transport qedi_iscsi_transport; 24extern struct iscsi_transport qedi_iscsi_transport;
19extern const struct qed_iscsi_ops *qedi_ops; 25extern const struct qed_iscsi_ops *qedi_ops;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index b9f79d36142d..4cc474364c50 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -833,7 +833,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
833 return ERR_PTR(ret); 833 return ERR_PTR(ret);
834 } 834 }
835 835
836 if (do_not_recover) { 836 if (qedi_do_not_recover) {
837 ret = -ENOMEM; 837 ret = -ENOMEM;
838 return ERR_PTR(ret); 838 return ERR_PTR(ret);
839 } 839 }
@@ -957,7 +957,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
957 struct qedi_endpoint *qedi_ep; 957 struct qedi_endpoint *qedi_ep;
958 int ret = 0; 958 int ret = 0;
959 959
960 if (do_not_recover) 960 if (qedi_do_not_recover)
961 return 1; 961 return 1;
962 962
963 qedi_ep = ep->dd_data; 963 qedi_ep = ep->dd_data;
@@ -1025,7 +1025,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1025 } 1025 }
1026 1026
1027 if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) { 1027 if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
1028 if (do_not_recover) { 1028 if (qedi_do_not_recover) {
1029 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1029 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1030 "Do not recover cid=0x%x\n", 1030 "Do not recover cid=0x%x\n",
1031 qedi_ep->iscsi_cid); 1031 qedi_ep->iscsi_cid);
@@ -1039,7 +1039,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1039 } 1039 }
1040 } 1040 }
1041 1041
1042 if (do_not_recover) 1042 if (qedi_do_not_recover)
1043 goto ep_exit_recover; 1043 goto ep_exit_recover;
1044 1044
1045 switch (qedi_ep->state) { 1045 switch (qedi_ep->state) {
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5eda21d903e9..8e3d92807cb8 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
1805 */ 1805 */
1806 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); 1806 qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
1807 1807
1808 qedi_setup_int(qedi); 1808 rc = qedi_setup_int(qedi);
1809 if (rc) 1809 if (rc)
1810 goto stop_iscsi_func; 1810 goto stop_iscsi_func;
1811 1811
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 21d9fb7fc887..51b4179469d1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2707 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); 2707 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2708 ql_dbg(level, vha, id, 2708 ql_dbg(level, vha, id,
2709 "----- -----------------------------------------------\n"); 2709 "----- -----------------------------------------------\n");
2710 for (cnt = 0; cnt < size; cnt++, buf++) { 2710 for (cnt = 0; cnt < size; cnt += 16) {
2711 if (cnt % 16 == 0) 2711 ql_dbg(level, vha, id, "%04x: ", cnt);
2712 ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU); 2712 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2713 printk(" %02x", *buf); 2713 buf + cnt, min(16U, size - cnt), false);
2714 if (cnt % 16 == 15)
2715 printk("\n");
2716 } 2714 }
2717 if (cnt % 16 != 0)
2718 printk("\n");
2719} 2715}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ba2286652ff6..19125d72f322 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2932,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume);
2932/** 2932/**
2933 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2933 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2934 * @sdev: device to block 2934 * @sdev: device to block
2935 * @wait: Whether or not to wait until ongoing .queuecommand() /
2936 * .queue_rq() calls have finished.
2935 * 2937 *
2936 * Block request made by scsi lld's to temporarily stop all 2938 * Block request made by scsi lld's to temporarily stop all
2937 * scsi commands on the specified device. May sleep. 2939 * scsi commands on the specified device. May sleep.
@@ -2949,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume);
2949 * remove the rport mutex lock and unlock calls from srp_queuecommand(). 2951 * remove the rport mutex lock and unlock calls from srp_queuecommand().
2950 */ 2952 */
2951int 2953int
2952scsi_internal_device_block(struct scsi_device *sdev) 2954scsi_internal_device_block(struct scsi_device *sdev, bool wait)
2953{ 2955{
2954 struct request_queue *q = sdev->request_queue; 2956 struct request_queue *q = sdev->request_queue;
2955 unsigned long flags; 2957 unsigned long flags;
@@ -2969,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
2969 * request queue. 2971 * request queue.
2970 */ 2972 */
2971 if (q->mq_ops) { 2973 if (q->mq_ops) {
2972 blk_mq_quiesce_queue(q); 2974 if (wait)
2975 blk_mq_quiesce_queue(q);
2976 else
2977 blk_mq_stop_hw_queues(q);
2973 } else { 2978 } else {
2974 spin_lock_irqsave(q->queue_lock, flags); 2979 spin_lock_irqsave(q->queue_lock, flags);
2975 blk_stop_queue(q); 2980 blk_stop_queue(q);
2976 spin_unlock_irqrestore(q->queue_lock, flags); 2981 spin_unlock_irqrestore(q->queue_lock, flags);
2977 scsi_wait_for_queuecommand(sdev); 2982 if (wait)
2983 scsi_wait_for_queuecommand(sdev);
2978 } 2984 }
2979 2985
2980 return 0; 2986 return 0;
@@ -3036,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
3036static void 3042static void
3037device_block(struct scsi_device *sdev, void *data) 3043device_block(struct scsi_device *sdev, void *data)
3038{ 3044{
3039 scsi_internal_device_block(sdev); 3045 scsi_internal_device_block(sdev, true);
3040} 3046}
3041 3047
3042static int 3048static int
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99bfc985e190..f11bd102d6d5 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
188 */ 188 */
189 189
190#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */ 190#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
191extern int scsi_internal_device_block(struct scsi_device *sdev);
192extern int scsi_internal_device_unblock(struct scsi_device *sdev,
193 enum scsi_device_state new_state);
194 191
195#endif /* _SCSI_PRIV_H */ 192#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c7839f6c35cc..fcfeddc79331 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1783,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1783{ 1783{
1784 int result = SCpnt->result; 1784 int result = SCpnt->result;
1785 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); 1785 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1786 unsigned int sector_size = SCpnt->device->sector_size;
1787 unsigned int resid;
1786 struct scsi_sense_hdr sshdr; 1788 struct scsi_sense_hdr sshdr;
1787 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); 1789 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
1788 struct request *req = SCpnt->request; 1790 struct request *req = SCpnt->request;
@@ -1813,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1813 scsi_set_resid(SCpnt, blk_rq_bytes(req)); 1815 scsi_set_resid(SCpnt, blk_rq_bytes(req));
1814 } 1816 }
1815 break; 1817 break;
1818 default:
1819 /*
1820 * In case of bogus fw or device, we could end up having
1821 * an unaligned partial completion. Check this here and force
1822 * alignment.
1823 */
1824 resid = scsi_get_resid(SCpnt);
1825 if (resid & (sector_size - 1)) {
1826 sd_printk(KERN_INFO, sdkp,
1827 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
1828 resid, sector_size);
1829 resid = min(scsi_bufflen(SCpnt),
1830 round_up(resid, sector_size));
1831 scsi_set_resid(SCpnt, resid);
1832 }
1816 } 1833 }
1817 1834
1818 if (result) { 1835 if (result) {
@@ -3075,23 +3092,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
3075 put_device(&sdkp->dev); 3092 put_device(&sdkp->dev);
3076} 3093}
3077 3094
3078struct sd_devt {
3079 int idx;
3080 struct disk_devt disk_devt;
3081};
3082
3083static void sd_devt_release(struct disk_devt *disk_devt)
3084{
3085 struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
3086 disk_devt);
3087
3088 spin_lock(&sd_index_lock);
3089 ida_remove(&sd_index_ida, sd_devt->idx);
3090 spin_unlock(&sd_index_lock);
3091
3092 kfree(sd_devt);
3093}
3094
3095/** 3095/**
3096 * sd_probe - called during driver initialization and whenever a 3096 * sd_probe - called during driver initialization and whenever a
3097 * new scsi device is attached to the system. It is called once 3097 * new scsi device is attached to the system. It is called once
@@ -3113,7 +3113,6 @@ static void sd_devt_release(struct disk_devt *disk_devt)
3113static int sd_probe(struct device *dev) 3113static int sd_probe(struct device *dev)
3114{ 3114{
3115 struct scsi_device *sdp = to_scsi_device(dev); 3115 struct scsi_device *sdp = to_scsi_device(dev);
3116 struct sd_devt *sd_devt;
3117 struct scsi_disk *sdkp; 3116 struct scsi_disk *sdkp;
3118 struct gendisk *gd; 3117 struct gendisk *gd;
3119 int index; 3118 int index;
@@ -3139,13 +3138,9 @@ static int sd_probe(struct device *dev)
3139 if (!sdkp) 3138 if (!sdkp)
3140 goto out; 3139 goto out;
3141 3140
3142 sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
3143 if (!sd_devt)
3144 goto out_free;
3145
3146 gd = alloc_disk(SD_MINORS); 3141 gd = alloc_disk(SD_MINORS);
3147 if (!gd) 3142 if (!gd)
3148 goto out_free_devt; 3143 goto out_free;
3149 3144
3150 do { 3145 do {
3151 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) 3146 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3161,11 +3156,6 @@ static int sd_probe(struct device *dev)
3161 goto out_put; 3156 goto out_put;
3162 } 3157 }
3163 3158
3164 atomic_set(&sd_devt->disk_devt.count, 1);
3165 sd_devt->disk_devt.release = sd_devt_release;
3166 sd_devt->idx = index;
3167 gd->disk_devt = &sd_devt->disk_devt;
3168
3169 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3159 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3170 if (error) { 3160 if (error) {
3171 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3161 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3205,12 +3195,11 @@ static int sd_probe(struct device *dev)
3205 return 0; 3195 return 0;
3206 3196
3207 out_free_index: 3197 out_free_index:
3208 put_disk_devt(&sd_devt->disk_devt); 3198 spin_lock(&sd_index_lock);
3209 sd_devt = NULL; 3199 ida_remove(&sd_index_ida, index);
3200 spin_unlock(&sd_index_lock);
3210 out_put: 3201 out_put:
3211 put_disk(gd); 3202 put_disk(gd);
3212 out_free_devt:
3213 kfree(sd_devt);
3214 out_free: 3203 out_free:
3215 kfree(sdkp); 3204 kfree(sdkp);
3216 out: 3205 out:
@@ -3271,7 +3260,10 @@ static void scsi_disk_release(struct device *dev)
3271 struct scsi_disk *sdkp = to_scsi_disk(dev); 3260 struct scsi_disk *sdkp = to_scsi_disk(dev);
3272 struct gendisk *disk = sdkp->disk; 3261 struct gendisk *disk = sdkp->disk;
3273 3262
3274 put_disk_devt(disk->disk_devt); 3263 spin_lock(&sd_index_lock);
3264 ida_remove(&sd_index_ida, sdkp->index);
3265 spin_unlock(&sd_index_lock);
3266
3275 disk->private_data = NULL; 3267 disk->private_data = NULL;
3276 put_disk(disk); 3268 put_disk(disk);
3277 put_device(&sdkp->device->sdev_gendev); 3269 put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 638e5f427c90..016639d7fef1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
400 */ 400 */
401static int storvsc_timeout = 180; 401static int storvsc_timeout = 180;
402 402
403static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
404
405#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 403#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
406static struct scsi_transport_template *fc_transport_template; 404static struct scsi_transport_template *fc_transport_template;
407#endif 405#endif
@@ -1383,6 +1381,22 @@ static int storvsc_do_io(struct hv_device *device,
1383 return ret; 1381 return ret;
1384} 1382}
1385 1383
1384static int storvsc_device_alloc(struct scsi_device *sdevice)
1385{
1386 /*
1387 * Set blist flag to permit the reading of the VPD pages even when
1388 * the target may claim SPC-2 compliance. MSFT targets currently
1389 * claim SPC-2 compliance while they implement post SPC-2 features.
1390 * With this flag we can correctly handle WRITE_SAME_16 issues.
1391 *
1392 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
1393 * still supports REPORT LUN.
1394 */
1395 sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
1396
1397 return 0;
1398}
1399
1386static int storvsc_device_configure(struct scsi_device *sdevice) 1400static int storvsc_device_configure(struct scsi_device *sdevice)
1387{ 1401{
1388 1402
@@ -1396,14 +1410,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
1396 sdevice->no_write_same = 1; 1410 sdevice->no_write_same = 1;
1397 1411
1398 /* 1412 /*
1399 * Add blist flags to permit the reading of the VPD pages even when
1400 * the target may claim SPC-2 compliance. MSFT targets currently
1401 * claim SPC-2 compliance while they implement post SPC-2 features.
1402 * With this patch we can correctly handle WRITE_SAME_16 issues.
1403 */
1404 sdevice->sdev_bflags |= msft_blist_flags;
1405
1406 /*
1407 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 1413 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
1408 * if the device is a MSFT virtual device. If the host is 1414 * if the device is a MSFT virtual device. If the host is
1409 * WIN10 or newer, allow write_same. 1415 * WIN10 or newer, allow write_same.
@@ -1661,6 +1667,7 @@ static struct scsi_host_template scsi_driver = {
1661 .eh_host_reset_handler = storvsc_host_reset_handler, 1667 .eh_host_reset_handler = storvsc_host_reset_handler,
1662 .proc_name = "storvsc_host", 1668 .proc_name = "storvsc_host",
1663 .eh_timed_out = storvsc_eh_timed_out, 1669 .eh_timed_out = storvsc_eh_timed_out,
1670 .slave_alloc = storvsc_device_alloc,
1664 .slave_configure = storvsc_device_configure, 1671 .slave_configure = storvsc_device_configure,
1665 .cmd_per_lun = 255, 1672 .cmd_per_lun = 255,
1666 .this_id = -1, 1673 .this_id = -1,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 318e4a1f76c9..54deeb754db5 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -146,7 +146,7 @@ enum attr_idn {
146/* Descriptor idn for Query requests */ 146/* Descriptor idn for Query requests */
147enum desc_idn { 147enum desc_idn {
148 QUERY_DESC_IDN_DEVICE = 0x0, 148 QUERY_DESC_IDN_DEVICE = 0x0,
149 QUERY_DESC_IDN_CONFIGURAION = 0x1, 149 QUERY_DESC_IDN_CONFIGURATION = 0x1,
150 QUERY_DESC_IDN_UNIT = 0x2, 150 QUERY_DESC_IDN_UNIT = 0x2,
151 QUERY_DESC_IDN_RFU_0 = 0x3, 151 QUERY_DESC_IDN_RFU_0 = 0x3,
152 QUERY_DESC_IDN_INTERCONNECT = 0x4, 152 QUERY_DESC_IDN_INTERCONNECT = 0x4,
@@ -162,19 +162,13 @@ enum desc_header_offset {
162 QUERY_DESC_DESC_TYPE_OFFSET = 0x01, 162 QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
163}; 163};
164 164
165enum ufs_desc_max_size { 165enum ufs_desc_def_size {
166 QUERY_DESC_DEVICE_MAX_SIZE = 0x40, 166 QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
167 QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, 167 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
168 QUERY_DESC_UNIT_MAX_SIZE = 0x23, 168 QUERY_DESC_UNIT_DEF_SIZE = 0x23,
169 QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, 169 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
170 /* 170 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
171 * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes 171 QUERY_DESC_POWER_DEF_SIZE = 0x62,
172 * of descriptor header.
173 */
174 QUERY_DESC_STRING_MAX_SIZE = 0xFE,
175 QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
176 QUERY_DESC_POWER_MAX_SIZE = 0x62,
177 QUERY_DESC_RFU_MAX_SIZE = 0x00,
178}; 172};
179 173
180/* Unit descriptor parameters offsets in bytes*/ 174/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dc6efbd1be8e..1359913bf840 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -100,19 +100,6 @@
100#define ufshcd_hex_dump(prefix_str, buf, len) \ 100#define ufshcd_hex_dump(prefix_str, buf, len) \
101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) 101print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
102 102
103static u32 ufs_query_desc_max_size[] = {
104 QUERY_DESC_DEVICE_MAX_SIZE,
105 QUERY_DESC_CONFIGURAION_MAX_SIZE,
106 QUERY_DESC_UNIT_MAX_SIZE,
107 QUERY_DESC_RFU_MAX_SIZE,
108 QUERY_DESC_INTERCONNECT_MAX_SIZE,
109 QUERY_DESC_STRING_MAX_SIZE,
110 QUERY_DESC_RFU_MAX_SIZE,
111 QUERY_DESC_GEOMETRY_MAX_SIZE,
112 QUERY_DESC_POWER_MAX_SIZE,
113 QUERY_DESC_RFU_MAX_SIZE,
114};
115
116enum { 103enum {
117 UFSHCD_MAX_CHANNEL = 0, 104 UFSHCD_MAX_CHANNEL = 0,
118 UFSHCD_MAX_ID = 1, 105 UFSHCD_MAX_ID = 1,
@@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2857 goto out; 2844 goto out;
2858 } 2845 }
2859 2846
2860 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { 2847 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2861 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", 2848 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2862 __func__, *buf_len); 2849 __func__, *buf_len);
2863 err = -EINVAL; 2850 err = -EINVAL;
@@ -2938,6 +2925,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2938} 2925}
2939 2926
2940/** 2927/**
2928 * ufshcd_read_desc_length - read the specified descriptor length from header
2929 * @hba: Pointer to adapter instance
2930 * @desc_id: descriptor idn value
2931 * @desc_index: descriptor index
2932 * @desc_length: pointer to variable to read the length of descriptor
2933 *
2934 * Return 0 in case of success, non-zero otherwise
2935 */
2936static int ufshcd_read_desc_length(struct ufs_hba *hba,
2937 enum desc_idn desc_id,
2938 int desc_index,
2939 int *desc_length)
2940{
2941 int ret;
2942 u8 header[QUERY_DESC_HDR_SIZE];
2943 int header_len = QUERY_DESC_HDR_SIZE;
2944
2945 if (desc_id >= QUERY_DESC_IDN_MAX)
2946 return -EINVAL;
2947
2948 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2949 desc_id, desc_index, 0, header,
2950 &header_len);
2951
2952 if (ret) {
2953 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2954 __func__, desc_id);
2955 return ret;
2956 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2957 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2958 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2959 desc_id);
2960 ret = -EINVAL;
2961 }
2962
2963 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2964 return ret;
2965
2966}
2967
2968/**
2969 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2970 * @hba: Pointer to adapter instance
2971 * @desc_id: descriptor idn value
2972 * @desc_len: mapped desc length (out)
2973 *
2974 * Return 0 in case of success, non-zero otherwise
2975 */
2976int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2977 enum desc_idn desc_id, int *desc_len)
2978{
2979 switch (desc_id) {
2980 case QUERY_DESC_IDN_DEVICE:
2981 *desc_len = hba->desc_size.dev_desc;
2982 break;
2983 case QUERY_DESC_IDN_POWER:
2984 *desc_len = hba->desc_size.pwr_desc;
2985 break;
2986 case QUERY_DESC_IDN_GEOMETRY:
2987 *desc_len = hba->desc_size.geom_desc;
2988 break;
2989 case QUERY_DESC_IDN_CONFIGURATION:
2990 *desc_len = hba->desc_size.conf_desc;
2991 break;
2992 case QUERY_DESC_IDN_UNIT:
2993 *desc_len = hba->desc_size.unit_desc;
2994 break;
2995 case QUERY_DESC_IDN_INTERCONNECT:
2996 *desc_len = hba->desc_size.interc_desc;
2997 break;
2998 case QUERY_DESC_IDN_STRING:
2999 *desc_len = QUERY_DESC_MAX_SIZE;
3000 break;
3001 case QUERY_DESC_IDN_RFU_0:
3002 case QUERY_DESC_IDN_RFU_1:
3003 *desc_len = 0;
3004 break;
3005 default:
3006 *desc_len = 0;
3007 return -EINVAL;
3008 }
3009 return 0;
3010}
3011EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3012
3013/**
2941 * ufshcd_read_desc_param - read the specified descriptor parameter 3014 * ufshcd_read_desc_param - read the specified descriptor parameter
2942 * @hba: Pointer to adapter instance 3015 * @hba: Pointer to adapter instance
2943 * @desc_id: descriptor idn value 3016 * @desc_id: descriptor idn value
@@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2951static int ufshcd_read_desc_param(struct ufs_hba *hba, 3024static int ufshcd_read_desc_param(struct ufs_hba *hba,
2952 enum desc_idn desc_id, 3025 enum desc_idn desc_id,
2953 int desc_index, 3026 int desc_index,
2954 u32 param_offset, 3027 u8 param_offset,
2955 u8 *param_read_buf, 3028 u8 *param_read_buf,
2956 u32 param_size) 3029 u8 param_size)
2957{ 3030{
2958 int ret; 3031 int ret;
2959 u8 *desc_buf; 3032 u8 *desc_buf;
2960 u32 buff_len; 3033 int buff_len;
2961 bool is_kmalloc = true; 3034 bool is_kmalloc = true;
2962 3035
2963 /* safety checks */ 3036 /* Safety check */
2964 if (desc_id >= QUERY_DESC_IDN_MAX) 3037 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
2965 return -EINVAL; 3038 return -EINVAL;
2966 3039
2967 buff_len = ufs_query_desc_max_size[desc_id]; 3040 /* Get the max length of descriptor from structure filled up at probe
2968 if ((param_offset + param_size) > buff_len) 3041 * time.
2969 return -EINVAL; 3042 */
3043 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
2970 3044
2971 if (!param_offset && (param_size == buff_len)) { 3045 /* Sanity checks */
2972 /* memory space already available to hold full descriptor */ 3046 if (ret || !buff_len) {
2973 desc_buf = param_read_buf; 3047 dev_err(hba->dev, "%s: Failed to get full descriptor length",
2974 is_kmalloc = false; 3048 __func__);
2975 } else { 3049 return ret;
2976 /* allocate memory to hold full descriptor */ 3050 }
3051
3052 /* Check whether we need temp memory */
3053 if (param_offset != 0 || param_size < buff_len) {
2977 desc_buf = kmalloc(buff_len, GFP_KERNEL); 3054 desc_buf = kmalloc(buff_len, GFP_KERNEL);
2978 if (!desc_buf) 3055 if (!desc_buf)
2979 return -ENOMEM; 3056 return -ENOMEM;
3057 } else {
3058 desc_buf = param_read_buf;
3059 is_kmalloc = false;
2980 } 3060 }
2981 3061
3062 /* Request for full descriptor */
2982 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 3063 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2983 desc_id, desc_index, 0, desc_buf, 3064 desc_id, desc_index, 0,
2984 &buff_len); 3065 desc_buf, &buff_len);
2985 3066
2986 if (ret) { 3067 if (ret) {
2987 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", 3068 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
2988 __func__, desc_id, desc_index, param_offset, ret); 3069 __func__, desc_id, desc_index, param_offset, ret);
2989
2990 goto out; 3070 goto out;
2991 } 3071 }
2992 3072
@@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
2998 goto out; 3078 goto out;
2999 } 3079 }
3000 3080
3001 /* 3081 /* Check wherher we will not copy more data, than available */
3002 * While reading variable size descriptors (like string descriptor), 3082 if (is_kmalloc && param_size > buff_len)
3003 * some UFS devices may report the "LENGTH" (field in "Transaction 3083 param_size = buff_len;
3004 * Specific fields" of Query Response UPIU) same as what was requested
3005 * in Query Request UPIU instead of reporting the actual size of the
3006 * variable size descriptor.
3007 * Although it's safe to ignore the "LENGTH" field for variable size
3008 * descriptors as we can always derive the length of the descriptor from
3009 * the descriptor header fields. Hence this change impose the length
3010 * match check only for fixed size descriptors (for which we always
3011 * request the correct size as part of Query Request UPIU).
3012 */
3013 if ((desc_id != QUERY_DESC_IDN_STRING) &&
3014 (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
3015 dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
3016 __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
3017 ret = -EINVAL;
3018 goto out;
3019 }
3020 3084
3021 if (is_kmalloc) 3085 if (is_kmalloc)
3022 memcpy(param_read_buf, &desc_buf[param_offset], param_size); 3086 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -5919,8 +5983,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
5919static void ufshcd_init_icc_levels(struct ufs_hba *hba) 5983static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5920{ 5984{
5921 int ret; 5985 int ret;
5922 int buff_len = QUERY_DESC_POWER_MAX_SIZE; 5986 int buff_len = hba->desc_size.pwr_desc;
5923 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; 5987 u8 desc_buf[hba->desc_size.pwr_desc];
5924 5988
5925 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); 5989 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5926 if (ret) { 5990 if (ret) {
@@ -6017,11 +6081,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6017{ 6081{
6018 int err; 6082 int err;
6019 u8 model_index; 6083 u8 model_index;
6020 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0}; 6084 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6021 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; 6085 u8 desc_buf[hba->desc_size.dev_desc];
6022 6086
6023 err = ufshcd_read_device_desc(hba, desc_buf, 6087 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6024 QUERY_DESC_DEVICE_MAX_SIZE);
6025 if (err) { 6088 if (err) {
6026 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", 6089 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6027 __func__, err); 6090 __func__, err);
@@ -6038,14 +6101,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6038 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 6101 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6039 6102
6040 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, 6103 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6041 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); 6104 QUERY_DESC_MAX_SIZE, ASCII_STD);
6042 if (err) { 6105 if (err) {
6043 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 6106 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6044 __func__, err); 6107 __func__, err);
6045 goto out; 6108 goto out;
6046 } 6109 }
6047 6110
6048 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; 6111 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6049 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), 6112 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6050 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], 6113 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6051 MAX_MODEL_LEN)); 6114 MAX_MODEL_LEN));
@@ -6251,6 +6314,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6251 hba->req_abort_count = 0; 6314 hba->req_abort_count = 0;
6252} 6315}
6253 6316
6317static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6318{
6319 int err;
6320
6321 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6322 &hba->desc_size.dev_desc);
6323 if (err)
6324 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6325
6326 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6327 &hba->desc_size.pwr_desc);
6328 if (err)
6329 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6330
6331 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6332 &hba->desc_size.interc_desc);
6333 if (err)
6334 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6335
6336 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6337 &hba->desc_size.conf_desc);
6338 if (err)
6339 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6340
6341 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6342 &hba->desc_size.unit_desc);
6343 if (err)
6344 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6345
6346 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6347 &hba->desc_size.geom_desc);
6348 if (err)
6349 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6350}
6351
6352static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6353{
6354 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6355 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6356 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6357 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6358 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6359 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6360}
6361
6254/** 6362/**
6255 * ufshcd_probe_hba - probe hba to detect device and initialize 6363 * ufshcd_probe_hba - probe hba to detect device and initialize
6256 * @hba: per-adapter instance 6364 * @hba: per-adapter instance
@@ -6285,6 +6393,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6285 if (ret) 6393 if (ret)
6286 goto out; 6394 goto out;
6287 6395
6396 /* Init check for device descriptor sizes */
6397 ufshcd_init_desc_sizes(hba);
6398
6288 ret = ufs_get_device_desc(hba, &card); 6399 ret = ufs_get_device_desc(hba, &card);
6289 if (ret) { 6400 if (ret) {
6290 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", 6401 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
@@ -6320,6 +6431,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6320 6431
6321 /* set the state as operational after switching to desired gear */ 6432 /* set the state as operational after switching to desired gear */
6322 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 6433 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6434
6323 /* 6435 /*
6324 * If we are in error handling context or in power management callbacks 6436 * If we are in error handling context or in power management callbacks
6325 * context, no need to scan the host 6437 * context, no need to scan the host
@@ -7774,6 +7886,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7774 hba->mmio_base = mmio_base; 7886 hba->mmio_base = mmio_base;
7775 hba->irq = irq; 7887 hba->irq = irq;
7776 7888
7889 /* Set descriptor lengths to specification defaults */
7890 ufshcd_def_desc_sizes(hba);
7891
7777 err = ufshcd_hba_init(hba); 7892 err = ufshcd_hba_init(hba);
7778 if (err) 7893 if (err)
7779 goto out_error; 7894 goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7630600217a2..cdc8bd05f7df 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -220,6 +220,15 @@ struct ufs_dev_cmd {
220 struct ufs_query query; 220 struct ufs_query query;
221}; 221};
222 222
223struct ufs_desc_size {
224 int dev_desc;
225 int pwr_desc;
226 int geom_desc;
227 int interc_desc;
228 int unit_desc;
229 int conf_desc;
230};
231
223/** 232/**
224 * struct ufs_clk_info - UFS clock related info 233 * struct ufs_clk_info - UFS clock related info
225 * @list: list headed by hba->clk_list_head 234 * @list: list headed by hba->clk_list_head
@@ -483,6 +492,7 @@ struct ufs_stats {
483 * @clk_list_head: UFS host controller clocks list node head 492 * @clk_list_head: UFS host controller clocks list node head
484 * @pwr_info: holds current power mode 493 * @pwr_info: holds current power mode
485 * @max_pwr_info: keeps the device max valid pwm 494 * @max_pwr_info: keeps the device max valid pwm
495 * @desc_size: descriptor sizes reported by device
486 * @urgent_bkops_lvl: keeps track of urgent bkops level for device 496 * @urgent_bkops_lvl: keeps track of urgent bkops level for device
487 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for 497 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
488 * device is known or not. 498 * device is known or not.
@@ -666,6 +676,7 @@ struct ufs_hba {
666 bool is_urgent_bkops_lvl_checked; 676 bool is_urgent_bkops_lvl_checked;
667 677
668 struct rw_semaphore clk_scaling_lock; 678 struct rw_semaphore clk_scaling_lock;
679 struct ufs_desc_size desc_size;
669}; 680};
670 681
671/* Returns true if clocks can be gated. Otherwise false */ 682/* Returns true if clocks can be gated. Otherwise false */
@@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
832 enum flag_idn idn, bool *flag_res); 843 enum flag_idn idn, bool *flag_res);
833int ufshcd_hold(struct ufs_hba *hba, bool async); 844int ufshcd_hold(struct ufs_hba *hba, bool async);
834void ufshcd_release(struct ufs_hba *hba); 845void ufshcd_release(struct ufs_hba *hba);
846
847int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
848 int *desc_length);
849
835u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); 850u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
836 851
837/* Wrapper functions for safely calling variant operations */ 852/* Wrapper functions for safely calling variant operations */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ef474a748744..c374e3b5c678 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1487 irq_flag &= ~PCI_IRQ_MSI; 1487 irq_flag &= ~PCI_IRQ_MSI;
1488 1488
1489 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); 1489 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
1490 if (error) 1490 if (error < 0)
1491 goto out_reset_adapter; 1491 goto out_reset_adapter;
1492 1492
1493 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); 1493 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index b7b87ecefcdf..9fca8d225ee0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
532 532
533 newsock->ops = sock->ops; 533 newsock->ops = sock->ops;
534 534
535 rc = sock->ops->accept(sock, newsock, O_NONBLOCK); 535 rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
536 if (rc == -EAGAIN) { 536 if (rc == -EAGAIN) {
537 /* Nothing ready, so wait for activity */ 537 /* Nothing ready, so wait for activity */
538 init_waitqueue_entry(&wait, current); 538 init_waitqueue_entry(&wait, current);
@@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
540 set_current_state(TASK_INTERRUPTIBLE); 540 set_current_state(TASK_INTERRUPTIBLE);
541 schedule(); 541 schedule();
542 remove_wait_queue(sk_sleep(sock->sk), &wait); 542 remove_wait_queue(sk_sleep(sock->sk), &wait);
543 rc = sock->ops->accept(sock, newsock, O_NONBLOCK); 543 rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
544 } 544 }
545 545
546 if (rc) 546 if (rc)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 7f8cf875157c..65a285631994 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
336 if (likely((port < TOTAL_NUMBER_OF_PORTS) && 336 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
337 cvm_oct_device[port])) { 337 cvm_oct_device[port])) {
338 struct net_device *dev = cvm_oct_device[port]; 338 struct net_device *dev = cvm_oct_device[port];
339 struct octeon_ethernet *priv = netdev_priv(dev);
340 339
341 /* 340 /*
342 * Only accept packets for devices that are 341 * Only accept packets for devices that are
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index e61e4ca064a8..74094fff4367 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,6 +1,7 @@
1config BCM2835_VCHIQ 1config BCM2835_VCHIQ
2 tristate "Videocore VCHIQ" 2 tristate "Videocore VCHIQ"
3 depends on HAS_DMA 3 depends on HAS_DMA
4 depends on OF
4 depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) 5 depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
5 default y 6 default y
6 help 7 help
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 1bacbc3b19a0..e94aea8c0d05 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -114,7 +114,7 @@
114#define DEFAULT_TX_BUF_COUNT 3 114#define DEFAULT_TX_BUF_COUNT 3
115 115
116struct n_hdlc_buf { 116struct n_hdlc_buf {
117 struct n_hdlc_buf *link; 117 struct list_head list_item;
118 int count; 118 int count;
119 char buf[1]; 119 char buf[1];
120}; 120};
@@ -122,8 +122,7 @@ struct n_hdlc_buf {
122#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe) 122#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
123 123
124struct n_hdlc_buf_list { 124struct n_hdlc_buf_list {
125 struct n_hdlc_buf *head; 125 struct list_head list;
126 struct n_hdlc_buf *tail;
127 int count; 126 int count;
128 spinlock_t spinlock; 127 spinlock_t spinlock;
129}; 128};
@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
136 * @backup_tty - TTY to use if tty gets closed 135 * @backup_tty - TTY to use if tty gets closed
137 * @tbusy - reentrancy flag for tx wakeup code 136 * @tbusy - reentrancy flag for tx wakeup code
138 * @woke_up - FIXME: describe this field 137 * @woke_up - FIXME: describe this field
139 * @tbuf - currently transmitting tx buffer
140 * @tx_buf_list - list of pending transmit frame buffers 138 * @tx_buf_list - list of pending transmit frame buffers
141 * @rx_buf_list - list of received frame buffers 139 * @rx_buf_list - list of received frame buffers
142 * @tx_free_buf_list - list unused transmit frame buffers 140 * @tx_free_buf_list - list unused transmit frame buffers
@@ -149,7 +147,6 @@ struct n_hdlc {
149 struct tty_struct *backup_tty; 147 struct tty_struct *backup_tty;
150 int tbusy; 148 int tbusy;
151 int woke_up; 149 int woke_up;
152 struct n_hdlc_buf *tbuf;
153 struct n_hdlc_buf_list tx_buf_list; 150 struct n_hdlc_buf_list tx_buf_list;
154 struct n_hdlc_buf_list rx_buf_list; 151 struct n_hdlc_buf_list rx_buf_list;
155 struct n_hdlc_buf_list tx_free_buf_list; 152 struct n_hdlc_buf_list tx_free_buf_list;
@@ -159,6 +156,8 @@ struct n_hdlc {
159/* 156/*
160 * HDLC buffer list manipulation functions 157 * HDLC buffer list manipulation functions
161 */ 158 */
159static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
160 struct n_hdlc_buf *buf);
162static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, 161static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
163 struct n_hdlc_buf *buf); 162 struct n_hdlc_buf *buf);
164static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); 163static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
208{ 207{
209 struct n_hdlc *n_hdlc = tty2n_hdlc(tty); 208 struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
210 struct n_hdlc_buf *buf; 209 struct n_hdlc_buf *buf;
211 unsigned long flags;
212 210
213 while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) 211 while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
214 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); 212 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
215 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
216 if (n_hdlc->tbuf) {
217 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
218 n_hdlc->tbuf = NULL;
219 }
220 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
221} 213}
222 214
223static struct tty_ldisc_ops n_hdlc_ldisc = { 215static struct tty_ldisc_ops n_hdlc_ldisc = {
@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
283 } else 275 } else
284 break; 276 break;
285 } 277 }
286 kfree(n_hdlc->tbuf);
287 kfree(n_hdlc); 278 kfree(n_hdlc);
288 279
289} /* end of n_hdlc_release() */ 280} /* end of n_hdlc_release() */
@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
402 n_hdlc->woke_up = 0; 393 n_hdlc->woke_up = 0;
403 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); 394 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
404 395
405 /* get current transmit buffer or get new transmit */ 396 tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
406 /* buffer from list of pending transmit buffers */
407
408 tbuf = n_hdlc->tbuf;
409 if (!tbuf)
410 tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
411
412 while (tbuf) { 397 while (tbuf) {
413 if (debuglevel >= DEBUG_LEVEL_INFO) 398 if (debuglevel >= DEBUG_LEVEL_INFO)
414 printk("%s(%d)sending frame %p, count=%d\n", 399 printk("%s(%d)sending frame %p, count=%d\n",
@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
420 405
421 /* rollback was possible and has been done */ 406 /* rollback was possible and has been done */
422 if (actual == -ERESTARTSYS) { 407 if (actual == -ERESTARTSYS) {
423 n_hdlc->tbuf = tbuf; 408 n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
424 break; 409 break;
425 } 410 }
426 /* if transmit error, throw frame away by */ 411 /* if transmit error, throw frame away by */
@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
435 420
436 /* free current transmit buffer */ 421 /* free current transmit buffer */
437 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); 422 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
438 423
439 /* this tx buffer is done */
440 n_hdlc->tbuf = NULL;
441
442 /* wait up sleeping writers */ 424 /* wait up sleeping writers */
443 wake_up_interruptible(&tty->write_wait); 425 wake_up_interruptible(&tty->write_wait);
444 426
@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
448 if (debuglevel >= DEBUG_LEVEL_INFO) 430 if (debuglevel >= DEBUG_LEVEL_INFO)
449 printk("%s(%d)frame %p pending\n", 431 printk("%s(%d)frame %p pending\n",
450 __FILE__,__LINE__,tbuf); 432 __FILE__,__LINE__,tbuf);
451 433
452 /* buffer not accepted by driver */ 434 /*
453 /* set this buffer as pending buffer */ 435 * the buffer was not accepted by driver,
454 n_hdlc->tbuf = tbuf; 436 * return it back into tx queue
437 */
438 n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
455 break; 439 break;
456 } 440 }
457 } 441 }
@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
749 int error = 0; 733 int error = 0;
750 int count; 734 int count;
751 unsigned long flags; 735 unsigned long flags;
752 736 struct n_hdlc_buf *buf = NULL;
737
753 if (debuglevel >= DEBUG_LEVEL_INFO) 738 if (debuglevel >= DEBUG_LEVEL_INFO)
754 printk("%s(%d)n_hdlc_tty_ioctl() called %d\n", 739 printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
755 __FILE__,__LINE__,cmd); 740 __FILE__,__LINE__,cmd);
@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
763 /* report count of read data available */ 748 /* report count of read data available */
764 /* in next available frame (if any) */ 749 /* in next available frame (if any) */
765 spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags); 750 spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
766 if (n_hdlc->rx_buf_list.head) 751 buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
767 count = n_hdlc->rx_buf_list.head->count; 752 struct n_hdlc_buf, list_item);
753 if (buf)
754 count = buf->count;
768 else 755 else
769 count = 0; 756 count = 0;
770 spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags); 757 spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
776 count = tty_chars_in_buffer(tty); 763 count = tty_chars_in_buffer(tty);
777 /* add size of next output frame in queue */ 764 /* add size of next output frame in queue */
778 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); 765 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
779 if (n_hdlc->tx_buf_list.head) 766 buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
780 count += n_hdlc->tx_buf_list.head->count; 767 struct n_hdlc_buf, list_item);
768 if (buf)
769 count += buf->count;
781 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags); 770 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
782 error = put_user(count, (int __user *)arg); 771 error = put_user(count, (int __user *)arg);
783 break; 772 break;
@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
825 poll_wait(filp, &tty->write_wait, wait); 814 poll_wait(filp, &tty->write_wait, wait);
826 815
827 /* set bits for operations that won't block */ 816 /* set bits for operations that won't block */
828 if (n_hdlc->rx_buf_list.head) 817 if (!list_empty(&n_hdlc->rx_buf_list.list))
829 mask |= POLLIN | POLLRDNORM; /* readable */ 818 mask |= POLLIN | POLLRDNORM; /* readable */
830 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 819 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
831 mask |= POLLHUP; 820 mask |= POLLHUP;
832 if (tty_hung_up_p(filp)) 821 if (tty_hung_up_p(filp))
833 mask |= POLLHUP; 822 mask |= POLLHUP;
834 if (!tty_is_writelocked(tty) && 823 if (!tty_is_writelocked(tty) &&
835 n_hdlc->tx_free_buf_list.head) 824 !list_empty(&n_hdlc->tx_free_buf_list.list))
836 mask |= POLLOUT | POLLWRNORM; /* writable */ 825 mask |= POLLOUT | POLLWRNORM; /* writable */
837 } 826 }
838 return mask; 827 return mask;
@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void)
856 spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); 845 spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
857 spin_lock_init(&n_hdlc->rx_buf_list.spinlock); 846 spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
858 spin_lock_init(&n_hdlc->tx_buf_list.spinlock); 847 spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
859 848
849 INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
850 INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
851 INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
852 INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
853
860 /* allocate free rx buffer list */ 854 /* allocate free rx buffer list */
861 for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) { 855 for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
862 buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL); 856 buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
@@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
884} /* end of n_hdlc_alloc() */ 878} /* end of n_hdlc_alloc() */
885 879
886/** 880/**
881 * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
882 * @buf_list - pointer to the buffer list
883 * @buf - pointer to the buffer
884 */
885static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
886 struct n_hdlc_buf *buf)
887{
888 unsigned long flags;
889
890 spin_lock_irqsave(&buf_list->spinlock, flags);
891
892 list_add(&buf->list_item, &buf_list->list);
893 buf_list->count++;
894
895 spin_unlock_irqrestore(&buf_list->spinlock, flags);
896}
897
898/**
887 * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list 899 * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
888 * @list - pointer to buffer list 900 * @buf_list - pointer to buffer list
889 * @buf - pointer to buffer 901 * @buf - pointer to buffer
890 */ 902 */
891static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, 903static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
892 struct n_hdlc_buf *buf) 904 struct n_hdlc_buf *buf)
893{ 905{
894 unsigned long flags; 906 unsigned long flags;
895 spin_lock_irqsave(&list->spinlock,flags); 907
896 908 spin_lock_irqsave(&buf_list->spinlock, flags);
897 buf->link=NULL; 909
898 if (list->tail) 910 list_add_tail(&buf->list_item, &buf_list->list);
899 list->tail->link = buf; 911 buf_list->count++;
900 else 912
901 list->head = buf; 913 spin_unlock_irqrestore(&buf_list->spinlock, flags);
902 list->tail = buf;
903 (list->count)++;
904
905 spin_unlock_irqrestore(&list->spinlock,flags);
906
907} /* end of n_hdlc_buf_put() */ 914} /* end of n_hdlc_buf_put() */
908 915
909/** 916/**
910 * n_hdlc_buf_get - remove and return an HDLC buffer from list 917 * n_hdlc_buf_get - remove and return an HDLC buffer from list
911 * @list - pointer to HDLC buffer list 918 * @buf_list - pointer to HDLC buffer list
912 * 919 *
913 * Remove and return an HDLC buffer from the head of the specified HDLC buffer 920 * Remove and return an HDLC buffer from the head of the specified HDLC buffer
914 * list. 921 * list.
915 * Returns a pointer to HDLC buffer if available, otherwise %NULL. 922 * Returns a pointer to HDLC buffer if available, otherwise %NULL.
916 */ 923 */
917static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list) 924static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
918{ 925{
919 unsigned long flags; 926 unsigned long flags;
920 struct n_hdlc_buf *buf; 927 struct n_hdlc_buf *buf;
921 spin_lock_irqsave(&list->spinlock,flags); 928
922 929 spin_lock_irqsave(&buf_list->spinlock, flags);
923 buf = list->head; 930
931 buf = list_first_entry_or_null(&buf_list->list,
932 struct n_hdlc_buf, list_item);
924 if (buf) { 933 if (buf) {
925 list->head = buf->link; 934 list_del(&buf->list_item);
926 (list->count)--; 935 buf_list->count--;
927 } 936 }
928 if (!list->head) 937
929 list->tail = NULL; 938 spin_unlock_irqrestore(&buf_list->spinlock, flags);
930
931 spin_unlock_irqrestore(&list->spinlock,flags);
932 return buf; 939 return buf;
933
934} /* end of n_hdlc_buf_get() */ 940} /* end of n_hdlc_buf_get() */
935 941
936static char hdlc_banner[] __initdata = 942static char hdlc_banner[] __initdata =
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index b4f86c219db1..7a17aedbf902 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port)
1031 if (ourport->dma) { 1031 if (ourport->dma) {
1032 ret = s3c24xx_serial_request_dma(ourport); 1032 ret = s3c24xx_serial_request_dma(ourport);
1033 if (ret < 0) { 1033 if (ret < 0) {
1034 dev_warn(port->dev, "DMA request failed\n"); 1034 dev_warn(port->dev,
1035 return ret; 1035 "DMA request failed, DMA will not be used\n");
1036 devm_kfree(port->dev, ourport->dma);
1037 ourport->dma = NULL;
1036 } 1038 }
1037 } 1039 }
1038 1040
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 2092e46b1380..f8d0747810e7 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
250 val = dwc3_omap_read_utmi_ctrl(omap); 250 val = dwc3_omap_read_utmi_ctrl(omap);
251 val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; 251 val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
252 dwc3_omap_write_utmi_ctrl(omap, val); 252 dwc3_omap_write_utmi_ctrl(omap, val);
253 break;
253 254
254 case OMAP_DWC3_VBUS_OFF: 255 case OMAP_DWC3_VBUS_OFF:
255 val = dwc3_omap_read_utmi_ctrl(omap); 256 val = dwc3_omap_read_utmi_ctrl(omap);
@@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
392{ 393{
393 u32 reg; 394 u32 reg;
394 struct device_node *node = omap->dev->of_node; 395 struct device_node *node = omap->dev->of_node;
395 int utmi_mode = 0; 396 u32 utmi_mode = 0;
396 397
397 reg = dwc3_omap_read_utmi_ctrl(omap); 398 reg = dwc3_omap_read_utmi_ctrl(omap);
398 399
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4db97ecae885..0d75158e43fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1342,6 +1342,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1342 if (r == req) { 1342 if (r == req) {
1343 /* wait until it is processed */ 1343 /* wait until it is processed */
1344 dwc3_stop_active_transfer(dwc, dep->number, true); 1344 dwc3_stop_active_transfer(dwc, dep->number, true);
1345
1346 /*
1347 * If request was already started, this means we had to
1348 * stop the transfer. With that we also need to ignore
1349 * all TRBs used by the request, however TRBs can only
1350 * be modified after completion of END_TRANSFER
1351 * command. So what we do here is that we wait for
1352 * END_TRANSFER completion and only after that, we jump
1353 * over TRBs by clearing HWO and incrementing dequeue
1354 * pointer.
1355 *
1356 * Note that we have 2 possible types of transfers here:
1357 *
1358 * i) Linear buffer request
1359 * ii) SG-list based request
1360 *
1361 * SG-list based requests will have r->num_pending_sgs
1362 * set to a valid number (> 0). Linear requests,
1363 * normally use a single TRB.
1364 *
1365 * For each of these two cases, if r->unaligned flag is
1366 * set, one extra TRB has been used to align transfer
1367 * size to wMaxPacketSize.
1368 *
1369 * All of these cases need to be taken into
1370 * consideration so we don't mess up our TRB ring
1371 * pointers.
1372 */
1373 wait_event_lock_irq(dep->wait_end_transfer,
1374 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
1375 dwc->lock);
1376
1377 if (!r->trb)
1378 goto out1;
1379
1380 if (r->num_pending_sgs) {
1381 struct dwc3_trb *trb;
1382 int i = 0;
1383
1384 for (i = 0; i < r->num_pending_sgs; i++) {
1385 trb = r->trb + i;
1386 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1387 dwc3_ep_inc_deq(dep);
1388 }
1389
1390 if (r->unaligned) {
1391 trb = r->trb + r->num_pending_sgs + 1;
1392 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1393 dwc3_ep_inc_deq(dep);
1394 }
1395 } else {
1396 struct dwc3_trb *trb = r->trb;
1397
1398 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1399 dwc3_ep_inc_deq(dep);
1400
1401 if (r->unaligned) {
1402 trb = r->trb + 1;
1403 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1404 dwc3_ep_inc_deq(dep);
1405 }
1406 }
1345 goto out1; 1407 goto out1;
1346 } 1408 }
1347 dev_err(dwc->dev, "request %p was not queued to %s\n", 1409 dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1352,6 +1414,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1352 1414
1353out1: 1415out1:
1354 /* giveback the request */ 1416 /* giveback the request */
1417 dep->queued_requests--;
1355 dwc3_gadget_giveback(dep, req, -ECONNRESET); 1418 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1356 1419
1357out0: 1420out0:
@@ -2126,12 +2189,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
2126 return 1; 2189 return 1;
2127 } 2190 }
2128 2191
2129 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
2130 return 1;
2131
2132 count = trb->size & DWC3_TRB_SIZE_MASK; 2192 count = trb->size & DWC3_TRB_SIZE_MASK;
2133 req->remaining += count; 2193 req->remaining += count;
2134 2194
2195 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
2196 return 1;
2197
2135 if (dep->direction) { 2198 if (dep->direction) {
2136 if (count) { 2199 if (count) {
2137 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); 2200 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
@@ -3228,15 +3291,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
3228 3291
3229int dwc3_gadget_suspend(struct dwc3 *dwc) 3292int dwc3_gadget_suspend(struct dwc3 *dwc)
3230{ 3293{
3231 int ret;
3232
3233 if (!dwc->gadget_driver) 3294 if (!dwc->gadget_driver)
3234 return 0; 3295 return 0;
3235 3296
3236 ret = dwc3_gadget_run_stop(dwc, false, false); 3297 dwc3_gadget_run_stop(dwc, false, false);
3237 if (ret < 0)
3238 return ret;
3239
3240 dwc3_disconnect_gadget(dwc); 3298 dwc3_disconnect_gadget(dwc);
3241 __dwc3_gadget_stop(dwc); 3299 __dwc3_gadget_stop(dwc);
3242 3300
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 3129bcf74d7d..265e223ab645 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -28,23 +28,23 @@ struct dwc3;
28#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) 28#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
29 29
30/* DEPCFG parameter 1 */ 30/* DEPCFG parameter 1 */
31#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0) 31#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
32#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8) 32#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
33#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9) 33#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
34#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10) 34#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
35#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11) 35#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
36#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13) 36#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
37#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16) 37#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
38#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24) 38#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
39#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25) 39#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
40#define DWC3_DEPCFG_BULK_BASED (1 << 30) 40#define DWC3_DEPCFG_BULK_BASED (1 << 30)
41#define DWC3_DEPCFG_FIFO_BASED (1 << 31) 41#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
42 42
43/* DEPCFG parameter 0 */ 43/* DEPCFG parameter 0 */
44#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1) 44#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
45#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3) 45#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
46#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17) 46#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
47#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22) 47#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
48#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) 48#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
49/* This applies for core versions earlier than 1.94a */ 49/* This applies for core versions earlier than 1.94a */
50#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31) 50#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 78c44979dde3..cbff3b02840d 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
269 ret = unregister_gadget(gi); 269 ret = unregister_gadget(gi);
270 if (ret) 270 if (ret)
271 goto err; 271 goto err;
272 kfree(name);
272 } else { 273 } else {
273 if (gi->composite.gadget_driver.udc_name) { 274 if (gi->composite.gadget_driver.udc_name) {
274 ret = -EBUSY; 275 ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a5b7cd615698..a0085571824d 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1834,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1834 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1834 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1835 while(count--) { 1835 while(count--) {
1836 struct usb_endpoint_descriptor *ds; 1836 struct usb_endpoint_descriptor *ds;
1837 struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
1838 int needs_comp_desc = false;
1837 int desc_idx; 1839 int desc_idx;
1838 1840
1839 if (ffs->gadget->speed == USB_SPEED_SUPER) 1841 if (ffs->gadget->speed == USB_SPEED_SUPER) {
1840 desc_idx = 2; 1842 desc_idx = 2;
1841 else if (ffs->gadget->speed == USB_SPEED_HIGH) 1843 needs_comp_desc = true;
1844 } else if (ffs->gadget->speed == USB_SPEED_HIGH)
1842 desc_idx = 1; 1845 desc_idx = 1;
1843 else 1846 else
1844 desc_idx = 0; 1847 desc_idx = 0;
@@ -1855,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1855 1858
1856 ep->ep->driver_data = ep; 1859 ep->ep->driver_data = ep;
1857 ep->ep->desc = ds; 1860 ep->ep->desc = ds;
1861
1862 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
1863 USB_DT_ENDPOINT_SIZE);
1864 ep->ep->maxburst = comp_desc->bMaxBurst + 1;
1865
1866 if (needs_comp_desc)
1867 ep->ep->comp_desc = comp_desc;
1868
1858 ret = usb_ep_enable(ep->ep); 1869 ret = usb_ep_enable(ep->ep);
1859 if (likely(!ret)) { 1870 if (likely(!ret)) {
1860 epfile->ep = ep; 1871 epfile->ep = ep;
@@ -2253,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2253 2264
2254 if (len < sizeof(*d) || 2265 if (len < sizeof(*d) ||
2255 d->bFirstInterfaceNumber >= ffs->interfaces_count || 2266 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2256 d->Reserved1) 2267 !d->Reserved1)
2257 return -EINVAL; 2268 return -EINVAL;
2258 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) 2269 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2259 if (d->Reserved2[i]) 2270 if (d->Reserved2[i])
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 27ed51b5082f..29b41b5dee04 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
258 memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); 258 memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
259 v4l2_event_queue(&uvc->vdev, &v4l2_event); 259 v4l2_event_queue(&uvc->vdev, &v4l2_event);
260 260
261 /* Pass additional setup data to userspace */
262 if (uvc->event_setup_out && uvc->event_length) {
263 uvc->control_req->length = uvc->event_length;
264 return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
265 uvc->control_req, GFP_ATOMIC);
266 }
267
268 return 0; 261 return 0;
269} 262}
270 263
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index a2615d64d07c..a2c916869293 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *);
84 84
85/* /dev/gadget/$CHIP represents ep0 and the whole device */ 85/* /dev/gadget/$CHIP represents ep0 and the whole device */
86enum ep0_state { 86enum ep0_state {
87 /* DISBLED is the initial state. 87 /* DISABLED is the initial state. */
88 */
89 STATE_DEV_DISABLED = 0, 88 STATE_DEV_DISABLED = 0,
90 89
91 /* Only one open() of /dev/gadget/$CHIP; only one file tracks 90 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
@@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1782 1781
1783 spin_lock_irq (&dev->lock); 1782 spin_lock_irq (&dev->lock);
1784 value = -EINVAL; 1783 value = -EINVAL;
1785 if (dev->buf) 1784 if (dev->buf) {
1785 kfree(kbuf);
1786 goto fail; 1786 goto fail;
1787 }
1787 dev->buf = kbuf; 1788 dev->buf = kbuf;
1788 1789
1789 /* full or low speed config */ 1790 /* full or low speed config */
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 11bbce28bc23..2035906b8ced 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
610{ 610{
611 struct usba_ep *ep = to_usba_ep(_ep); 611 struct usba_ep *ep = to_usba_ep(_ep);
612 struct usba_udc *udc = ep->udc; 612 struct usba_udc *udc = ep->udc;
613 unsigned long flags, ept_cfg, maxpacket; 613 unsigned long flags, maxpacket;
614 unsigned int nr_trans; 614 unsigned int nr_trans;
615 615
616 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); 616 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
@@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
630 ep->is_in = 0; 630 ep->is_in = 0;
631 631
632 DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", 632 DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
633 ep->ep.name, ept_cfg, maxpacket); 633 ep->ep.name, ep->ept_cfg, maxpacket);
634 634
635 if (usb_endpoint_dir_in(desc)) { 635 if (usb_endpoint_dir_in(desc)) {
636 ep->is_in = 1; 636 ep->is_in = 1;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index c60abe3a68f9..8cabc5944d5f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev)
1031 int rc; 1031 int rc;
1032 1032
1033 dum = *((void **)dev_get_platdata(&pdev->dev)); 1033 dum = *((void **)dev_get_platdata(&pdev->dev));
1034 /* Clear usb_gadget region for new registration to udc-core */
1035 memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
1034 dum->gadget.name = gadget_name; 1036 dum->gadget.name = gadget_name;
1035 dum->gadget.ops = &dummy_ops; 1037 dum->gadget.ops = &dummy_ops;
1036 dum->gadget.max_speed = USB_SPEED_SUPER; 1038 dum->gadget.max_speed = USB_SPEED_SUPER;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 85504419ab31..3828c2ec8623 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
1146 */ 1146 */
1147 while (!list_empty(&ep->queue)) { 1147 while (!list_empty(&ep->queue)) {
1148 struct net2280_request *req; 1148 struct net2280_request *req;
1149 u32 tmp; 1149 u32 req_dma_count;
1150 1150
1151 req = list_entry(ep->queue.next, 1151 req = list_entry(ep->queue.next,
1152 struct net2280_request, queue); 1152 struct net2280_request, queue);
1153 if (!req->valid) 1153 if (!req->valid)
1154 break; 1154 break;
1155 rmb(); 1155 rmb();
1156 tmp = le32_to_cpup(&req->td->dmacount); 1156 req_dma_count = le32_to_cpup(&req->td->dmacount);
1157 if ((tmp & BIT(VALID_BIT)) != 0) 1157 if ((req_dma_count & BIT(VALID_BIT)) != 0)
1158 break; 1158 break;
1159 1159
1160 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short" 1160 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
1163 */ 1163 */
1164 if (unlikely(req->td->dmadesc == 0)) { 1164 if (unlikely(req->td->dmadesc == 0)) {
1165 /* paranoia */ 1165 /* paranoia */
1166 tmp = readl(&ep->dma->dmacount); 1166 u32 const ep_dmacount = readl(&ep->dma->dmacount);
1167 if (tmp & DMA_BYTE_COUNT_MASK) 1167
1168 if (ep_dmacount & DMA_BYTE_COUNT_MASK)
1168 break; 1169 break;
1169 /* single transfer mode */ 1170 /* single transfer mode */
1170 dma_done(ep, req, tmp, 0); 1171 dma_done(ep, req, req_dma_count, 0);
1171 num_completed++; 1172 num_completed++;
1172 break; 1173 break;
1173 } else if (!ep->is_in && 1174 } else if (!ep->is_in &&
1174 (req->req.length % ep->ep.maxpacket) && 1175 (req->req.length % ep->ep.maxpacket) &&
1175 !(ep->dev->quirks & PLX_PCIE)) { 1176 !(ep->dev->quirks & PLX_PCIE)) {
1176 1177
1177 tmp = readl(&ep->regs->ep_stat); 1178 u32 const ep_stat = readl(&ep->regs->ep_stat);
1178 /* AVOID TROUBLE HERE by not issuing short reads from 1179 /* AVOID TROUBLE HERE by not issuing short reads from
1179 * your gadget driver. That helps avoids errata 0121, 1180 * your gadget driver. That helps avoids errata 0121,
1180 * 0122, and 0124; not all cases trigger the warning. 1181 * 0122, and 0124; not all cases trigger the warning.
1181 */ 1182 */
1182 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) { 1183 if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
1183 ep_warn(ep->dev, "%s lost packet sync!\n", 1184 ep_warn(ep->dev, "%s lost packet sync!\n",
1184 ep->ep.name); 1185 ep->ep.name);
1185 req->req.status = -EOVERFLOW; 1186 req->req.status = -EOVERFLOW;
1186 } else { 1187 } else {
1187 tmp = readl(&ep->regs->ep_avail); 1188 u32 const ep_avail = readl(&ep->regs->ep_avail);
1188 if (tmp) { 1189 if (ep_avail) {
1189 /* fifo gets flushed later */ 1190 /* fifo gets flushed later */
1190 ep->out_overflow = 1; 1191 ep->out_overflow = 1;
1191 ep_dbg(ep->dev, 1192 ep_dbg(ep->dev,
1192 "%s dma, discard %d len %d\n", 1193 "%s dma, discard %d len %d\n",
1193 ep->ep.name, tmp, 1194 ep->ep.name, ep_avail,
1194 req->req.length); 1195 req->req.length);
1195 req->req.status = -EOVERFLOW; 1196 req->req.status = -EOVERFLOW;
1196 } 1197 }
1197 } 1198 }
1198 } 1199 }
1199 dma_done(ep, req, tmp, 0); 1200 dma_done(ep, req, req_dma_count, 0);
1200 num_completed++; 1201 num_completed++;
1201 } 1202 }
1202 1203
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index e1335ad5bce9..832c4fdbe985 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
2534 usb_del_gadget_udc(&udc->gadget); 2534 usb_del_gadget_udc(&udc->gadget);
2535 pxa_cleanup_debugfs(udc); 2535 pxa_cleanup_debugfs(udc);
2536 2536
2537 if (!IS_ERR_OR_NULL(udc->transceiver)) 2537 if (!IS_ERR_OR_NULL(udc->transceiver)) {
2538 usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy); 2538 usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
2539 usb_put_phy(udc->transceiver); 2539 usb_put_phy(udc->transceiver);
2540 }
2540 2541
2541 udc->transceiver = NULL; 2542 udc->transceiver = NULL;
2542 the_controller = NULL; 2543 the_controller = NULL;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 414e3c376dbb..5302f988e7e6 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
350 350
351 case USB_PORT_FEAT_SUSPEND: 351 case USB_PORT_FEAT_SUSPEND:
352 dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n"); 352 dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
353 if (valid_port(wIndex)) { 353 if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
354 ohci_at91_port_suspend(ohci_at91->sfr_regmap, 354 ohci_at91_port_suspend(ohci_at91->sfr_regmap,
355 1); 355 1);
356 return 0; 356 return 0;
@@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
393 393
394 case USB_PORT_FEAT_SUSPEND: 394 case USB_PORT_FEAT_SUSPEND:
395 dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n"); 395 dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
396 if (valid_port(wIndex)) { 396 if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
397 ohci_at91_port_suspend(ohci_at91->sfr_regmap, 397 ohci_at91_port_suspend(ohci_at91->sfr_regmap,
398 0); 398 0);
399 return 0; 399 return 0;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 363d125300ea..2b4a00fa735d 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
109 xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK); 109 xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
110 110
111 /* xhci 1.1 controllers have the HCCPARAMS2 register */ 111 /* xhci 1.1 controllers have the HCCPARAMS2 register */
112 if (hci_version > 100) { 112 if (hci_version > 0x100) {
113 temp = readl(&xhci->cap_regs->hcc_params2); 113 temp = readl(&xhci->cap_regs->hcc_params2);
114 xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp); 114 xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
115 xhci_dbg(xhci, " HC %s Force save context capability", 115 xhci_dbg(xhci, " HC %s Force save context capability",
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 9066ec9e0c2e..67d5dc79b6b5 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
382 382
383static int xhci_mtk_setup(struct usb_hcd *hcd); 383static int xhci_mtk_setup(struct usb_hcd *hcd);
384static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { 384static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
385 .extra_priv_size = sizeof(struct xhci_hcd),
386 .reset = xhci_mtk_setup, 385 .reset = xhci_mtk_setup,
387}; 386};
388 387
@@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev)
678 goto power_off_phys; 677 goto power_off_phys;
679 } 678 }
680 679
681 if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
682 xhci->shared_hcd->can_do_streams = 1;
683
684 ret = usb_add_hcd(hcd, irq, IRQF_SHARED); 680 ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
685 if (ret) 681 if (ret)
686 goto put_usb3_hcd; 682 goto put_usb3_hcd;
687 683
684 if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
685 xhci->shared_hcd->can_do_streams = 1;
686
688 ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); 687 ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
689 if (ret) 688 if (ret)
690 goto dealloc_usb2_hcd; 689 goto dealloc_usb2_hcd;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6d33b42ffcf5..bd02a6cd8e2c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev)
286 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 286 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
287 struct clk *clk = xhci->clk; 287 struct clk *clk = xhci->clk;
288 288
289 xhci->xhc_state |= XHCI_STATE_REMOVING;
290
289 usb_remove_hcd(xhci->shared_hcd); 291 usb_remove_hcd(xhci->shared_hcd);
290 usb_phy_shutdown(hcd->usb_phy); 292 usb_phy_shutdown(hcd->usb_phy);
291 293
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a59fafb4b329..74436f8ca538 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd)
1308} 1308}
1309 1309
1310static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = { 1310static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
1311 .extra_priv_size = sizeof(struct xhci_hcd),
1312 .reset = tegra_xhci_setup, 1311 .reset = tegra_xhci_setup,
1313}; 1312};
1314 1313
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6d6c46000e56..50aee8b7718b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
868 868
869 spin_lock_irqsave(&xhci->lock, flags); 869 spin_lock_irqsave(&xhci->lock, flags);
870 870
871 /* disble usb3 ports Wake bits*/ 871 /* disable usb3 ports Wake bits */
872 port_index = xhci->num_usb3_ports; 872 port_index = xhci->num_usb3_ports;
873 port_array = xhci->usb3_ports; 873 port_array = xhci->usb3_ports;
874 while (port_index--) { 874 while (port_index--) {
@@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
879 writel(t2, port_array[port_index]); 879 writel(t2, port_array[port_index]);
880 } 880 }
881 881
882 /* disble usb2 ports Wake bits*/ 882 /* disable usb2 ports Wake bits */
883 port_index = xhci->num_usb2_ports; 883 port_index = xhci->num_usb2_ports;
884 port_array = xhci->usb2_ports; 884 port_array = xhci->usb2_ports;
885 while (port_index--) { 885 while (port_index--) {
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 095778ff984d..37c63cb39714 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface,
781 iface_desc = interface->cur_altsetting; 781 iface_desc = interface->cur_altsetting;
782 dev->product_id = le16_to_cpu(udev->descriptor.idProduct); 782 dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
783 783
784 if (iface_desc->desc.bNumEndpoints < 1) {
785 dev_err(&interface->dev, "Invalid number of endpoints\n");
786 retval = -EINVAL;
787 goto error;
788 }
789
790 /* set up the endpoint information */ 784 /* set up the endpoint information */
791 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 785 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
792 endpoint = &iface_desc->endpoint[i].desc; 786 endpoint = &iface_desc->endpoint[i].desc;
@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface,
797 /* this one will match for the IOWarrior56 only */ 791 /* this one will match for the IOWarrior56 only */
798 dev->int_out_endpoint = endpoint; 792 dev->int_out_endpoint = endpoint;
799 } 793 }
794
795 if (!dev->int_in_endpoint) {
796 dev_err(&interface->dev, "no interrupt-in endpoint found\n");
797 retval = -ENODEV;
798 goto error;
799 }
800
801 if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
802 if (!dev->int_out_endpoint) {
803 dev_err(&interface->dev, "no interrupt-out endpoint found\n");
804 retval = -ENODEV;
805 goto error;
806 }
807 }
808
800 /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ 809 /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
801 dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); 810 dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
802 if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && 811 if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4e18600dc9b4..91f66d68bcb7 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
375 if (of_get_property(np, "dynamic-power-switching", NULL)) 375 if (of_get_property(np, "dynamic-power-switching", NULL))
376 hub->conf_data2 |= BIT(7); 376 hub->conf_data2 |= BIT(7);
377 377
378 if (of_get_property(np, "oc-delay-100us", NULL)) { 378 if (!of_property_read_u32(np, "oc-delay-us", property_u32)) {
379 hub->conf_data2 &= ~BIT(5); 379 if (*property_u32 == 100) {
380 hub->conf_data2 &= ~BIT(4); 380 /* 100 us*/
381 } else if (of_get_property(np, "oc-delay-4ms", NULL)) { 381 hub->conf_data2 &= ~BIT(5);
382 hub->conf_data2 &= ~BIT(5); 382 hub->conf_data2 &= ~BIT(4);
383 hub->conf_data2 |= BIT(4); 383 } else if (*property_u32 == 4000) {
384 } else if (of_get_property(np, "oc-delay-8ms", NULL)) { 384 /* 4 ms */
385 hub->conf_data2 |= BIT(5); 385 hub->conf_data2 &= ~BIT(5);
386 hub->conf_data2 &= ~BIT(4); 386 hub->conf_data2 |= BIT(4);
387 } else if (of_get_property(np, "oc-delay-16ms", NULL)) { 387 } else if (*property_u32 == 16000) {
388 hub->conf_data2 |= BIT(5); 388 /* 16 ms */
389 hub->conf_data2 |= BIT(4); 389 hub->conf_data2 |= BIT(5);
390 hub->conf_data2 |= BIT(4);
391 } else {
392 /* 8 ms (DEFAULT) */
393 hub->conf_data2 |= BIT(5);
394 hub->conf_data2 &= ~BIT(4);
395 }
390 } 396 }
391 397
392 if (of_get_property(np, "compound-device", NULL)) 398 if (of_get_property(np, "compound-device", NULL))
@@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
432 } 438 }
433 } 439 }
434 440
435 hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
436 if (!of_property_read_u32(np, "max-sp-power", property_u32))
437 hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
438 250);
439
440 hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
441 if (!of_property_read_u32(np, "max-bp-power", property_u32))
442 hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
443 250);
444
445 hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
446 if (!of_property_read_u32(np, "max-sp-current", property_u32))
447 hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
448 250);
449
450 hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
451 if (!of_property_read_u32(np, "max-bp-current", property_u32))
452 hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
453 250);
454
455 hub->power_on_time = USB251XB_DEF_POWER_ON_TIME; 441 hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
456 if (!of_property_read_u32(np, "power-on-time", property_u32)) 442 if (!of_property_read_u32(np, "power-on-time-ms", property_u32))
457 hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2, 443 hub->power_on_time = min_t(u8, *property_u32 / 2, 255);
458 255);
459 444
460 if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) 445 if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
461 hub->lang_id = USB251XB_DEF_LANGUAGE_ID; 446 hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
@@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
492 /* The following parameters are currently not exposed to devicetree, but 477 /* The following parameters are currently not exposed to devicetree, but
493 * may be as soon as needed. 478 * may be as soon as needed.
494 */ 479 */
480 hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
481 hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
482 hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
483 hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
495 hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; 484 hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
496 hub->boost_up = USB251XB_DEF_BOOST_UP; 485 hub->boost_up = USB251XB_DEF_BOOST_UP;
497 hub->boost_x = USB251XB_DEF_BOOST_X; 486 hub->boost_x = USB251XB_DEF_BOOST_X;
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index db68156568e6..b3b33cf7ddf6 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
33}; 33};
34MODULE_DEVICE_TABLE(i2c, isp1301_id); 34MODULE_DEVICE_TABLE(i2c, isp1301_id);
35 35
36static const struct of_device_id isp1301_of_match[] = {
37 {.compatible = "nxp,isp1301" },
38 { },
39};
40MODULE_DEVICE_TABLE(of, isp1301_of_match);
41
36static struct i2c_client *isp1301_i2c_client; 42static struct i2c_client *isp1301_i2c_client;
37 43
38static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) 44static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
130static struct i2c_driver isp1301_driver = { 136static struct i2c_driver isp1301_driver = {
131 .driver = { 137 .driver = {
132 .name = DRV_NAME, 138 .name = DRV_NAME,
139 .of_match_table = of_match_ptr(isp1301_of_match),
133 }, 140 },
134 .probe = isp1301_probe, 141 .probe = isp1301_probe,
135 .remove = isp1301_remove, 142 .remove = isp1301_remove,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index ab78111e0968..6537d3ca2797 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1500,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb)
1500 return -1; 1500 return -1;
1501 1501
1502 /* handle each oob command */ 1502 /* handle each oob command */
1503 for (i = 0; i < urb->actual_length - 4; i += 4) { 1503 for (i = 0; i < urb->actual_length - 3; i += 4) {
1504 opcode = buf[i]; 1504 opcode = buf[i];
1505 line = buf[i + 1]; 1505 line = buf[i + 1];
1506 status = buf[i + 2]; 1506 status = buf[i + 2];
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index ceaeebaa6f90..a76b95d32157 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb)
1674 function = TIUMP_GET_FUNC_FROM_CODE(data[0]); 1674 function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
1675 dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__, 1675 dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
1676 port_number, function, data[1]); 1676 port_number, function, data[1]);
1677
1678 if (port_number >= edge_serial->serial->num_ports) {
1679 dev_err(dev, "bad port number %d\n", port_number);
1680 goto exit;
1681 }
1682
1677 port = edge_serial->serial->port[port_number]; 1683 port = edge_serial->serial->port[port_number];
1678 edge_port = usb_get_serial_port_data(port); 1684 edge_port = usb_get_serial_port_data(port);
1679 if (!edge_port) { 1685 if (!edge_port) {
@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb)
1755 1761
1756 port_number = edge_port->port->port_number; 1762 port_number = edge_port->port->port_number;
1757 1763
1758 if (edge_port->lsr_event) { 1764 if (urb->actual_length > 0 && edge_port->lsr_event) {
1759 edge_port->lsr_event = 0; 1765 edge_port->lsr_event = 0;
1760 dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n", 1766 dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
1761 __func__, port_number, edge_port->lsr_mask, *data); 1767 __func__, port_number, edge_port->lsr_mask, *data);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index a180b17d2432..dd706953b466 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -31,7 +31,6 @@
31#define BT_IGNITIONPRO_ID 0x2000 31#define BT_IGNITIONPRO_ID 0x2000
32 32
33/* function prototypes */ 33/* function prototypes */
34static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
35static void omninet_process_read_urb(struct urb *urb); 34static void omninet_process_read_urb(struct urb *urb);
36static void omninet_write_bulk_callback(struct urb *urb); 35static void omninet_write_bulk_callback(struct urb *urb);
37static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, 36static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = {
60 .attach = omninet_attach, 59 .attach = omninet_attach,
61 .port_probe = omninet_port_probe, 60 .port_probe = omninet_port_probe,
62 .port_remove = omninet_port_remove, 61 .port_remove = omninet_port_remove,
63 .open = omninet_open,
64 .write = omninet_write, 62 .write = omninet_write,
65 .write_room = omninet_write_room, 63 .write_room = omninet_write_room,
66 .write_bulk_callback = omninet_write_bulk_callback, 64 .write_bulk_callback = omninet_write_bulk_callback,
@@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port)
140 return 0; 138 return 0;
141} 139}
142 140
143static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
144{
145 struct usb_serial *serial = port->serial;
146 struct usb_serial_port *wport;
147
148 wport = serial->port[1];
149 tty_port_tty_set(&wport->port, tty);
150
151 return usb_serial_generic_open(tty, port);
152}
153
154#define OMNINET_HEADERLEN 4 141#define OMNINET_HEADERLEN 4
155#define OMNINET_BULKOUTSIZE 64 142#define OMNINET_BULKOUTSIZE 64
156#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN) 143#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 93c6c9b08daa..8a069aa154ed 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb)
200 if (!safe) 200 if (!safe)
201 goto out; 201 goto out;
202 202
203 if (length < 2) {
204 dev_err(&port->dev, "malformed packet\n");
205 return;
206 }
207
203 fcs = fcs_compute10(data, length, CRC10_INITFCS); 208 fcs = fcs_compute10(data, length, CRC10_INITFCS);
204 if (fcs) { 209 if (fcs) {
205 dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs); 210 dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 16cc18369111..9129f6cb8230 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
2071 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2071 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2072 US_FL_IGNORE_RESIDUE ), 2072 US_FL_IGNORE_RESIDUE ),
2073 2073
2074/*
2075 * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de>
2076 * The INIC-3619 bridge is used in the StarTech SLSODDU33B
2077 * SATA-USB enclosure for slimline optical drives.
2078 *
2079 * The quirk enables MakeMKV to properly exchange keys with
2080 * an installed BD drive.
2081 */
2082UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209,
2083 "Initio Corporation",
2084 "INIC-3619",
2085 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2086 US_FL_IGNORE_RESIDUE ),
2087
2074/* Reported by Qinglin Ye <yestyle@gmail.com> */ 2088/* Reported by Qinglin Ye <yestyle@gmail.com> */
2075UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, 2089UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
2076 "Kingston", 2090 "Kingston",
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index c77a0751a311..f3bf8f4e2d6c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -36,6 +36,7 @@
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/highmem.h> 38#include <linux/highmem.h>
39#include <linux/refcount.h>
39 40
40#include <xen/xen.h> 41#include <xen/xen.h>
41#include <xen/grant_table.h> 42#include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
86 int index; 87 int index;
87 int count; 88 int count;
88 int flags; 89 int flags;
89 atomic_t users; 90 refcount_t users;
90 struct unmap_notify notify; 91 struct unmap_notify notify;
91 struct ioctl_gntdev_grant_ref *grants; 92 struct ioctl_gntdev_grant_ref *grants;
92 struct gnttab_map_grant_ref *map_ops; 93 struct gnttab_map_grant_ref *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
166 167
167 add->index = 0; 168 add->index = 0;
168 add->count = count; 169 add->count = count;
169 atomic_set(&add->users, 1); 170 refcount_set(&add->users, 1);
170 171
171 return add; 172 return add;
172 173
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
212 if (!map) 213 if (!map)
213 return; 214 return;
214 215
215 if (!atomic_dec_and_test(&map->users)) 216 if (!refcount_dec_and_test(&map->users))
216 return; 217 return;
217 218
218 atomic_sub(map->count, &pages_mapped); 219 atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
400 struct grant_map *map = vma->vm_private_data; 401 struct grant_map *map = vma->vm_private_data;
401 402
402 pr_debug("gntdev_vma_open %p\n", vma); 403 pr_debug("gntdev_vma_open %p\n", vma);
403 atomic_inc(&map->users); 404 refcount_inc(&map->users);
404} 405}
405 406
406static void gntdev_vma_close(struct vm_area_struct *vma) 407static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1004 goto unlock_out; 1005 goto unlock_out;
1005 } 1006 }
1006 1007
1007 atomic_inc(&map->users); 1008 refcount_inc(&map->users);
1008 1009
1009 vma->vm_ops = &gntdev_vmops; 1010 vma->vm_ops = &gntdev_vmops;
1010 1011
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f8afc6dcc29f..e8cef1ad0fe3 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
681 return 0; 681 return 0;
682} 682}
683EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); 683EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
684
685/*
686 * Create userspace mapping for the DMA-coherent memory.
687 * This function should be called with the pages from the current domain only,
688 * passing pages mapped from other domains would lead to memory corruption.
689 */
690int
691xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
692 void *cpu_addr, dma_addr_t dma_addr, size_t size,
693 unsigned long attrs)
694{
695#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
696 if (__generic_dma_ops(dev)->mmap)
697 return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
698 dma_addr, size, attrs);
699#endif
700 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
701}
702EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
703
704/*
705 * This function should be called with the pages from the current domain only,
706 * passing pages mapped from other domains would lead to memory corruption.
707 */
708int
709xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
710 void *cpu_addr, dma_addr_t handle, size_t size,
711 unsigned long attrs)
712{
713#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
714 if (__generic_dma_ops(dev)->get_sgtable) {
715#if 0
716 /*
717 * This check verifies that the page belongs to the current domain and
718 * is not one mapped from another domain.
719 * This check is for debug only, and should not go to production build
720 */
721 unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
722 BUG_ON (!page_is_ram(bfn));
723#endif
724 return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
725 handle, size, attrs);
726 }
727#endif
728 return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
729}
730EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 4d343eed08f5..1f4733b80c87 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,7 +55,6 @@
55#include <linux/string.h> 55#include <linux/string.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57#include <linux/miscdevice.h> 57#include <linux/miscdevice.h>
58#include <linux/init.h>
59 58
60#include <xen/xenbus.h> 59#include <xen/xenbus.h>
61#include <xen/xen.h> 60#include <xen/xen.h>
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 7d398d300e97..9382db998ec9 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -743,7 +743,7 @@ static int tcp_accept_from_sock(struct connection *con)
743 newsock->type = con->sock->type; 743 newsock->type = con->sock->type;
744 newsock->ops = con->sock->ops; 744 newsock->ops = con->sock->ops;
745 745
746 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); 746 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
747 if (result < 0) 747 if (result < 0)
748 goto accept_err; 748 goto accept_err;
749 749
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 338d2f73eb29..a2c05f2ada6d 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1359,6 +1359,16 @@ out:
1359 return 0; 1359 return 0;
1360} 1360}
1361 1361
1362static void fat_dummy_inode_init(struct inode *inode)
1363{
1364 /* Initialize this dummy inode to work as no-op. */
1365 MSDOS_I(inode)->mmu_private = 0;
1366 MSDOS_I(inode)->i_start = 0;
1367 MSDOS_I(inode)->i_logstart = 0;
1368 MSDOS_I(inode)->i_attrs = 0;
1369 MSDOS_I(inode)->i_pos = 0;
1370}
1371
1362static int fat_read_root(struct inode *inode) 1372static int fat_read_root(struct inode *inode)
1363{ 1373{
1364 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); 1374 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1803,12 +1813,13 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1803 fat_inode = new_inode(sb); 1813 fat_inode = new_inode(sb);
1804 if (!fat_inode) 1814 if (!fat_inode)
1805 goto out_fail; 1815 goto out_fail;
1806 MSDOS_I(fat_inode)->i_pos = 0; 1816 fat_dummy_inode_init(fat_inode);
1807 sbi->fat_inode = fat_inode; 1817 sbi->fat_inode = fat_inode;
1808 1818
1809 fsinfo_inode = new_inode(sb); 1819 fsinfo_inode = new_inode(sb);
1810 if (!fsinfo_inode) 1820 if (!fsinfo_inode)
1811 goto out_fail; 1821 goto out_fail;
1822 fat_dummy_inode_init(fsinfo_inode);
1812 fsinfo_inode->i_ino = MSDOS_FSINFO_INO; 1823 fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
1813 sbi->fsinfo_inode = fsinfo_inode; 1824 sbi->fsinfo_inode = fsinfo_inode;
1814 insert_inode_hash(fsinfo_inode); 1825 insert_inode_hash(fsinfo_inode);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ef600591d96f..63ee2940775c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
173 spin_unlock_bh(&wb->work_lock); 173 spin_unlock_bh(&wb->work_lock);
174} 174}
175 175
176static void finish_writeback_work(struct bdi_writeback *wb,
177 struct wb_writeback_work *work)
178{
179 struct wb_completion *done = work->done;
180
181 if (work->auto_free)
182 kfree(work);
183 if (done && atomic_dec_and_test(&done->cnt))
184 wake_up_all(&wb->bdi->wb_waitq);
185}
186
176static void wb_queue_work(struct bdi_writeback *wb, 187static void wb_queue_work(struct bdi_writeback *wb,
177 struct wb_writeback_work *work) 188 struct wb_writeback_work *work)
178{ 189{
179 trace_writeback_queue(wb, work); 190 trace_writeback_queue(wb, work);
180 191
181 spin_lock_bh(&wb->work_lock);
182 if (!test_bit(WB_registered, &wb->state))
183 goto out_unlock;
184 if (work->done) 192 if (work->done)
185 atomic_inc(&work->done->cnt); 193 atomic_inc(&work->done->cnt);
186 list_add_tail(&work->list, &wb->work_list); 194
187 mod_delayed_work(bdi_wq, &wb->dwork, 0); 195 spin_lock_bh(&wb->work_lock);
188out_unlock: 196
197 if (test_bit(WB_registered, &wb->state)) {
198 list_add_tail(&work->list, &wb->work_list);
199 mod_delayed_work(bdi_wq, &wb->dwork, 0);
200 } else
201 finish_writeback_work(wb, work);
202
189 spin_unlock_bh(&wb->work_lock); 203 spin_unlock_bh(&wb->work_lock);
190} 204}
191 205
@@ -1873,16 +1887,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
1873 1887
1874 set_bit(WB_writeback_running, &wb->state); 1888 set_bit(WB_writeback_running, &wb->state);
1875 while ((work = get_next_work_item(wb)) != NULL) { 1889 while ((work = get_next_work_item(wb)) != NULL) {
1876 struct wb_completion *done = work->done;
1877
1878 trace_writeback_exec(wb, work); 1890 trace_writeback_exec(wb, work);
1879
1880 wrote += wb_writeback(wb, work); 1891 wrote += wb_writeback(wb, work);
1881 1892 finish_writeback_work(wb, work);
1882 if (work->auto_free)
1883 kfree(work);
1884 if (done && atomic_dec_and_test(&done->cnt))
1885 wake_up_all(&wb->bdi->wb_waitq);
1886 } 1893 }
1887 1894
1888 /* 1895 /*
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index c45084ac642d..511e1ed7e2de 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@ struct lm_lockname {
207 struct gfs2_sbd *ln_sbd; 207 struct gfs2_sbd *ln_sbd;
208 u64 ln_number; 208 u64 ln_number;
209 unsigned int ln_type; 209 unsigned int ln_type;
210}; 210} __packed __aligned(sizeof(int));
211 211
212#define lm_name_equal(name1, name2) \ 212#define lm_name_equal(name1, name2) \
213 (((name1)->ln_number == (name2)->ln_number) && \ 213 (((name1)->ln_number == (name2)->ln_number) && \
diff --git a/fs/iomap.c b/fs/iomap.c
index 3ca1a8e44135..141c3cd55a8b 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -846,7 +846,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
846 struct address_space *mapping = iocb->ki_filp->f_mapping; 846 struct address_space *mapping = iocb->ki_filp->f_mapping;
847 struct inode *inode = file_inode(iocb->ki_filp); 847 struct inode *inode = file_inode(iocb->ki_filp);
848 size_t count = iov_iter_count(iter); 848 size_t count = iov_iter_count(iter);
849 loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0; 849 loff_t pos = iocb->ki_pos, start = pos;
850 loff_t end = iocb->ki_pos + count - 1, ret = 0;
850 unsigned int flags = IOMAP_DIRECT; 851 unsigned int flags = IOMAP_DIRECT;
851 struct blk_plug plug; 852 struct blk_plug plug;
852 struct iomap_dio *dio; 853 struct iomap_dio *dio;
@@ -887,12 +888,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
887 } 888 }
888 889
889 if (mapping->nrpages) { 890 if (mapping->nrpages) {
890 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end); 891 ret = filemap_write_and_wait_range(mapping, start, end);
891 if (ret) 892 if (ret)
892 goto out_free_dio; 893 goto out_free_dio;
893 894
894 ret = invalidate_inode_pages2_range(mapping, 895 ret = invalidate_inode_pages2_range(mapping,
895 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); 896 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
896 WARN_ON_ONCE(ret); 897 WARN_ON_ONCE(ret);
897 ret = 0; 898 ret = 0;
898 } 899 }
@@ -941,6 +942,8 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
941 __set_current_state(TASK_RUNNING); 942 __set_current_state(TASK_RUNNING);
942 } 943 }
943 944
945 ret = iomap_dio_complete(dio);
946
944 /* 947 /*
945 * Try again to invalidate clean pages which might have been cached by 948 * Try again to invalidate clean pages which might have been cached by
946 * non-direct readahead, or faulted in by get_user_pages() if the source 949 * non-direct readahead, or faulted in by get_user_pages() if the source
@@ -949,12 +952,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
949 * this invalidation fails, tough, the write still worked... 952 * this invalidation fails, tough, the write still worked...
950 */ 953 */
951 if (iov_iter_rw(iter) == WRITE && mapping->nrpages) { 954 if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
952 ret = invalidate_inode_pages2_range(mapping, 955 int err = invalidate_inode_pages2_range(mapping,
953 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); 956 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
954 WARN_ON_ONCE(ret); 957 WARN_ON_ONCE(err);
955 } 958 }
956 959
957 return iomap_dio_complete(dio); 960 return ret;
958 961
959out_free_dio: 962out_free_dio:
960 kfree(dio); 963 kfree(dio);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 4348027384f5..d0ab7e56d0b4 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1863,7 +1863,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
1863 1863
1864 new_sock->type = sock->type; 1864 new_sock->type = sock->type;
1865 new_sock->ops = sock->ops; 1865 new_sock->ops = sock->ops;
1866 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); 1866 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
1867 if (ret < 0) 1867 if (ret < 0)
1868 goto out; 1868 goto out;
1869 1869
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 1953986ee6bc..6e610a205e15 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -12,7 +12,6 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/cred.h> 13#include <linux/cred.h>
14#include <linux/xattr.h> 14#include <linux/xattr.h>
15#include <linux/sched/signal.h>
16#include "overlayfs.h" 15#include "overlayfs.h"
17#include "ovl_entry.h" 16#include "ovl_entry.h"
18 17
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 384fa759a563..c543cdb5f8ed 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -400,9 +400,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
400 clockid != CLOCK_BOOTTIME_ALARM)) 400 clockid != CLOCK_BOOTTIME_ALARM))
401 return -EINVAL; 401 return -EINVAL;
402 402
403 if (!capable(CAP_WAKE_ALARM) && 403 if ((clockid == CLOCK_REALTIME_ALARM ||
404 (clockid == CLOCK_REALTIME_ALARM || 404 clockid == CLOCK_BOOTTIME_ALARM) &&
405 clockid == CLOCK_BOOTTIME_ALARM)) 405 !capable(CAP_WAKE_ALARM))
406 return -EPERM; 406 return -EPERM;
407 407
408 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 408 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -449,7 +449,7 @@ static int do_timerfd_settime(int ufd, int flags,
449 return ret; 449 return ret;
450 ctx = f.file->private_data; 450 ctx = f.file->private_data;
451 451
452 if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) { 452 if (isalarm(ctx) && !capable(CAP_WAKE_ALARM)) {
453 fdput(f); 453 fdput(f);
454 return -EPERM; 454 return -EPERM;
455 } 455 }
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 973607df579d..1d227b0fcf49 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -138,8 +138,6 @@ out:
138 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd 138 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
139 * context. 139 * context.
140 * @ctx: [in] Pointer to the userfaultfd context. 140 * @ctx: [in] Pointer to the userfaultfd context.
141 *
142 * Returns: In case of success, returns not zero.
143 */ 141 */
144static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) 142static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
145{ 143{
@@ -267,6 +265,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
267{ 265{
268 struct mm_struct *mm = ctx->mm; 266 struct mm_struct *mm = ctx->mm;
269 pgd_t *pgd; 267 pgd_t *pgd;
268 p4d_t *p4d;
270 pud_t *pud; 269 pud_t *pud;
271 pmd_t *pmd, _pmd; 270 pmd_t *pmd, _pmd;
272 pte_t *pte; 271 pte_t *pte;
@@ -277,7 +276,10 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
277 pgd = pgd_offset(mm, address); 276 pgd = pgd_offset(mm, address);
278 if (!pgd_present(*pgd)) 277 if (!pgd_present(*pgd))
279 goto out; 278 goto out;
280 pud = pud_offset(pgd, address); 279 p4d = p4d_offset(pgd, address);
280 if (!p4d_present(*p4d))
281 goto out;
282 pud = pud_offset(p4d, address);
281 if (!pud_present(*pud)) 283 if (!pud_present(*pud))
282 goto out; 284 goto out;
283 pmd = pmd_offset(pud, address); 285 pmd = pmd_offset(pud, address);
@@ -490,7 +492,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
490 * in such case. 492 * in such case.
491 */ 493 */
492 down_read(&mm->mmap_sem); 494 down_read(&mm->mmap_sem);
493 ret = 0; 495 ret = VM_FAULT_NOPAGE;
494 } 496 }
495 } 497 }
496 498
@@ -527,10 +529,11 @@ out:
527 return ret; 529 return ret;
528} 530}
529 531
530static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 532static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
531 struct userfaultfd_wait_queue *ewq) 533 struct userfaultfd_wait_queue *ewq)
532{ 534{
533 int ret = 0; 535 if (WARN_ON_ONCE(current->flags & PF_EXITING))
536 goto out;
534 537
535 ewq->ctx = ctx; 538 ewq->ctx = ctx;
536 init_waitqueue_entry(&ewq->wq, current); 539 init_waitqueue_entry(&ewq->wq, current);
@@ -547,8 +550,16 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
547 break; 550 break;
548 if (ACCESS_ONCE(ctx->released) || 551 if (ACCESS_ONCE(ctx->released) ||
549 fatal_signal_pending(current)) { 552 fatal_signal_pending(current)) {
550 ret = -1;
551 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 553 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
554 if (ewq->msg.event == UFFD_EVENT_FORK) {
555 struct userfaultfd_ctx *new;
556
557 new = (struct userfaultfd_ctx *)
558 (unsigned long)
559 ewq->msg.arg.reserved.reserved1;
560
561 userfaultfd_ctx_put(new);
562 }
552 break; 563 break;
553 } 564 }
554 565
@@ -566,9 +577,8 @@ static int userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
566 * ctx may go away after this if the userfault pseudo fd is 577 * ctx may go away after this if the userfault pseudo fd is
567 * already released. 578 * already released.
568 */ 579 */
569 580out:
570 userfaultfd_ctx_put(ctx); 581 userfaultfd_ctx_put(ctx);
571 return ret;
572} 582}
573 583
574static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, 584static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
@@ -626,7 +636,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
626 return 0; 636 return 0;
627} 637}
628 638
629static int dup_fctx(struct userfaultfd_fork_ctx *fctx) 639static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
630{ 640{
631 struct userfaultfd_ctx *ctx = fctx->orig; 641 struct userfaultfd_ctx *ctx = fctx->orig;
632 struct userfaultfd_wait_queue ewq; 642 struct userfaultfd_wait_queue ewq;
@@ -636,17 +646,15 @@ static int dup_fctx(struct userfaultfd_fork_ctx *fctx)
636 ewq.msg.event = UFFD_EVENT_FORK; 646 ewq.msg.event = UFFD_EVENT_FORK;
637 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; 647 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
638 648
639 return userfaultfd_event_wait_completion(ctx, &ewq); 649 userfaultfd_event_wait_completion(ctx, &ewq);
640} 650}
641 651
642void dup_userfaultfd_complete(struct list_head *fcs) 652void dup_userfaultfd_complete(struct list_head *fcs)
643{ 653{
644 int ret = 0;
645 struct userfaultfd_fork_ctx *fctx, *n; 654 struct userfaultfd_fork_ctx *fctx, *n;
646 655
647 list_for_each_entry_safe(fctx, n, fcs, list) { 656 list_for_each_entry_safe(fctx, n, fcs, list) {
648 if (!ret) 657 dup_fctx(fctx);
649 ret = dup_fctx(fctx);
650 list_del(&fctx->list); 658 list_del(&fctx->list);
651 kfree(fctx); 659 kfree(fctx);
652 } 660 }
@@ -689,8 +697,7 @@ void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
689 userfaultfd_event_wait_completion(ctx, &ewq); 697 userfaultfd_event_wait_completion(ctx, &ewq);
690} 698}
691 699
692void userfaultfd_remove(struct vm_area_struct *vma, 700bool userfaultfd_remove(struct vm_area_struct *vma,
693 struct vm_area_struct **prev,
694 unsigned long start, unsigned long end) 701 unsigned long start, unsigned long end)
695{ 702{
696 struct mm_struct *mm = vma->vm_mm; 703 struct mm_struct *mm = vma->vm_mm;
@@ -699,13 +706,11 @@ void userfaultfd_remove(struct vm_area_struct *vma,
699 706
700 ctx = vma->vm_userfaultfd_ctx.ctx; 707 ctx = vma->vm_userfaultfd_ctx.ctx;
701 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) 708 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
702 return; 709 return true;
703 710
704 userfaultfd_ctx_get(ctx); 711 userfaultfd_ctx_get(ctx);
705 up_read(&mm->mmap_sem); 712 up_read(&mm->mmap_sem);
706 713
707 *prev = NULL; /* We wait for ACK w/o the mmap semaphore */
708
709 msg_init(&ewq.msg); 714 msg_init(&ewq.msg);
710 715
711 ewq.msg.event = UFFD_EVENT_REMOVE; 716 ewq.msg.event = UFFD_EVENT_REMOVE;
@@ -714,7 +719,7 @@ void userfaultfd_remove(struct vm_area_struct *vma,
714 719
715 userfaultfd_event_wait_completion(ctx, &ewq); 720 userfaultfd_event_wait_completion(ctx, &ewq);
716 721
717 down_read(&mm->mmap_sem); 722 return false;
718} 723}
719 724
720static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, 725static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
@@ -775,34 +780,6 @@ void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
775 } 780 }
776} 781}
777 782
778void userfaultfd_exit(struct mm_struct *mm)
779{
780 struct vm_area_struct *vma = mm->mmap;
781
782 /*
783 * We can do the vma walk without locking because the caller
784 * (exit_mm) knows it now has exclusive access
785 */
786 while (vma) {
787 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
788
789 if (ctx && (ctx->features & UFFD_FEATURE_EVENT_EXIT)) {
790 struct userfaultfd_wait_queue ewq;
791
792 userfaultfd_ctx_get(ctx);
793
794 msg_init(&ewq.msg);
795 ewq.msg.event = UFFD_EVENT_EXIT;
796
797 userfaultfd_event_wait_completion(ctx, &ewq);
798
799 ctx->features &= ~UFFD_FEATURE_EVENT_EXIT;
800 }
801
802 vma = vma->vm_next;
803 }
804}
805
806static int userfaultfd_release(struct inode *inode, struct file *file) 783static int userfaultfd_release(struct inode *inode, struct file *file)
807{ 784{
808 struct userfaultfd_ctx *ctx = file->private_data; 785 struct userfaultfd_ctx *ctx = file->private_data;
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 2dfdc62f795e..70a5b55e0870 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -25,24 +25,6 @@
25#include "kmem.h" 25#include "kmem.h"
26#include "xfs_message.h" 26#include "xfs_message.h"
27 27
28/*
29 * Greedy allocation. May fail and may return vmalloced memory.
30 */
31void *
32kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
33{
34 void *ptr;
35 size_t kmsize = maxsize;
36
37 while (!(ptr = vzalloc(kmsize))) {
38 if ((kmsize >>= 1) <= minsize)
39 kmsize = minsize;
40 }
41 if (ptr)
42 *size = kmsize;
43 return ptr;
44}
45
46void * 28void *
47kmem_alloc(size_t size, xfs_km_flags_t flags) 29kmem_alloc(size_t size, xfs_km_flags_t flags)
48{ 30{
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 689f746224e7..f0fc84fcaac2 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr)
69} 69}
70 70
71 71
72extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
73
74static inline void * 72static inline void *
75kmem_zalloc(size_t size, xfs_km_flags_t flags) 73kmem_zalloc(size_t size, xfs_km_flags_t flags)
76{ 74{
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a9c66d47757a..9bd104f32908 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -763,8 +763,8 @@ xfs_bmap_extents_to_btree(
763 args.type = XFS_ALLOCTYPE_START_BNO; 763 args.type = XFS_ALLOCTYPE_START_BNO;
764 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); 764 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
765 } else if (dfops->dop_low) { 765 } else if (dfops->dop_low) {
766try_another_ag:
767 args.type = XFS_ALLOCTYPE_START_BNO; 766 args.type = XFS_ALLOCTYPE_START_BNO;
767try_another_ag:
768 args.fsbno = *firstblock; 768 args.fsbno = *firstblock;
769 } else { 769 } else {
770 args.type = XFS_ALLOCTYPE_NEAR_BNO; 770 args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -790,13 +790,17 @@ try_another_ag:
790 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && 790 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
791 args.fsbno == NULLFSBLOCK && 791 args.fsbno == NULLFSBLOCK &&
792 args.type == XFS_ALLOCTYPE_NEAR_BNO) { 792 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
793 dfops->dop_low = true; 793 args.type = XFS_ALLOCTYPE_FIRST_AG;
794 goto try_another_ag; 794 goto try_another_ag;
795 } 795 }
796 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
797 xfs_iroot_realloc(ip, -1, whichfork);
798 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
799 return -ENOSPC;
800 }
796 /* 801 /*
797 * Allocation can't fail, the space was reserved. 802 * Allocation can't fail, the space was reserved.
798 */ 803 */
799 ASSERT(args.fsbno != NULLFSBLOCK);
800 ASSERT(*firstblock == NULLFSBLOCK || 804 ASSERT(*firstblock == NULLFSBLOCK ||
801 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock)); 805 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
802 *firstblock = cur->bc_private.b.firstblock = args.fsbno; 806 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
@@ -4150,6 +4154,19 @@ xfs_bmapi_read(
4150 return 0; 4154 return 0;
4151} 4155}
4152 4156
4157/*
4158 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4159 * global pool and the extent inserted into the inode in-core extent tree.
4160 *
4161 * On entry, got refers to the first extent beyond the offset of the extent to
4162 * allocate or eof is specified if no such extent exists. On return, got refers
4163 * to the extent record that was inserted to the inode fork.
4164 *
4165 * Note that the allocated extent may have been merged with contiguous extents
4166 * during insertion into the inode fork. Thus, got does not reflect the current
4167 * state of the inode fork on return. If necessary, the caller can use lastx to
4168 * look up the updated record in the inode fork.
4169 */
4153int 4170int
4154xfs_bmapi_reserve_delalloc( 4171xfs_bmapi_reserve_delalloc(
4155 struct xfs_inode *ip, 4172 struct xfs_inode *ip,
@@ -4236,13 +4253,8 @@ xfs_bmapi_reserve_delalloc(
4236 got->br_startblock = nullstartblock(indlen); 4253 got->br_startblock = nullstartblock(indlen);
4237 got->br_blockcount = alen; 4254 got->br_blockcount = alen;
4238 got->br_state = XFS_EXT_NORM; 4255 got->br_state = XFS_EXT_NORM;
4239 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4240 4256
4241 /* 4257 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4242 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4243 * might have merged it into one of the neighbouring ones.
4244 */
4245 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4246 4258
4247 /* 4259 /*
4248 * Tag the inode if blocks were preallocated. Note that COW fork 4260 * Tag the inode if blocks were preallocated. Note that COW fork
@@ -4254,10 +4266,6 @@ xfs_bmapi_reserve_delalloc(
4254 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 4266 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4255 xfs_inode_set_cowblocks_tag(ip); 4267 xfs_inode_set_cowblocks_tag(ip);
4256 4268
4257 ASSERT(got->br_startoff <= aoff);
4258 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4259 ASSERT(isnullstartblock(got->br_startblock));
4260 ASSERT(got->br_state == XFS_EXT_NORM);
4261 return 0; 4269 return 0;
4262 4270
4263out_unreserve_blocks: 4271out_unreserve_blocks:
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index f93072b58a58..fd55db479385 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -447,8 +447,8 @@ xfs_bmbt_alloc_block(
447 447
448 if (args.fsbno == NULLFSBLOCK) { 448 if (args.fsbno == NULLFSBLOCK) {
449 args.fsbno = be64_to_cpu(start->l); 449 args.fsbno = be64_to_cpu(start->l);
450try_another_ag:
451 args.type = XFS_ALLOCTYPE_START_BNO; 450 args.type = XFS_ALLOCTYPE_START_BNO;
451try_another_ag:
452 /* 452 /*
453 * Make sure there is sufficient room left in the AG to 453 * Make sure there is sufficient room left in the AG to
454 * complete a full tree split for an extent insert. If 454 * complete a full tree split for an extent insert. If
@@ -488,8 +488,8 @@ try_another_ag:
488 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && 488 if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
489 args.fsbno == NULLFSBLOCK && 489 args.fsbno == NULLFSBLOCK &&
490 args.type == XFS_ALLOCTYPE_NEAR_BNO) { 490 args.type == XFS_ALLOCTYPE_NEAR_BNO) {
491 cur->bc_private.b.dfops->dop_low = true;
492 args.fsbno = cur->bc_private.b.firstblock; 491 args.fsbno = cur->bc_private.b.firstblock;
492 args.type = XFS_ALLOCTYPE_FIRST_AG;
493 goto try_another_ag; 493 goto try_another_ag;
494 } 494 }
495 495
@@ -506,7 +506,7 @@ try_another_ag:
506 goto error0; 506 goto error0;
507 cur->bc_private.b.dfops->dop_low = true; 507 cur->bc_private.b.dfops->dop_low = true;
508 } 508 }
509 if (args.fsbno == NULLFSBLOCK) { 509 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
510 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 510 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
511 *stat = 0; 511 *stat = 0;
512 return 0; 512 return 0;
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index d04547fcf274..eb00bc133bca 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,6 +125,8 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); 125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
126extern int xfs_dir2_sf_removename(struct xfs_da_args *args); 126extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
127extern int xfs_dir2_sf_replace(struct xfs_da_args *args); 127extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
128extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
129 int size);
128 130
129/* xfs_dir2_readdir.c */ 131/* xfs_dir2_readdir.c */
130extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, 132extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff41197..96b45cd6c63f 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,93 @@ xfs_dir2_sf_check(
629} 629}
630#endif /* DEBUG */ 630#endif /* DEBUG */
631 631
632/* Verify the consistency of an inline directory. */
633int
634xfs_dir2_sf_verify(
635 struct xfs_mount *mp,
636 struct xfs_dir2_sf_hdr *sfp,
637 int size)
638{
639 struct xfs_dir2_sf_entry *sfep;
640 struct xfs_dir2_sf_entry *next_sfep;
641 char *endp;
642 const struct xfs_dir_ops *dops;
643 xfs_ino_t ino;
644 int i;
645 int i8count;
646 int offset;
647 __uint8_t filetype;
648
649 dops = xfs_dir_get_ops(mp, NULL);
650
651 /*
652 * Give up if the directory is way too short.
653 */
654 XFS_WANT_CORRUPTED_RETURN(mp, size >
655 offsetof(struct xfs_dir2_sf_hdr, parent));
656 XFS_WANT_CORRUPTED_RETURN(mp, size >=
657 xfs_dir2_sf_hdr_size(sfp->i8count));
658
659 endp = (char *)sfp + size;
660
661 /* Check .. entry */
662 ino = dops->sf_get_parent_ino(sfp);
663 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
664 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
665 offset = dops->data_first_offset;
666
667 /* Check all reported entries */
668 sfep = xfs_dir2_sf_firstentry(sfp);
669 for (i = 0; i < sfp->count; i++) {
670 /*
671 * struct xfs_dir2_sf_entry has a variable length.
672 * Check the fixed-offset parts of the structure are
673 * within the data buffer.
674 */
675 XFS_WANT_CORRUPTED_RETURN(mp,
676 ((char *)sfep + sizeof(*sfep)) < endp);
677
678 /* Don't allow names with known bad length. */
679 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
680 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
681
682 /*
683 * Check that the variable-length part of the structure is
684 * within the data buffer. The next entry starts after the
685 * name component, so nextentry is an acceptable test.
686 */
687 next_sfep = dops->sf_nextentry(sfp, sfep);
688 XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
689
690 /* Check that the offsets always increase. */
691 XFS_WANT_CORRUPTED_RETURN(mp,
692 xfs_dir2_sf_get_offset(sfep) >= offset);
693
694 /* Check the inode number. */
695 ino = dops->sf_get_ino(sfp, sfep);
696 i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
697 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
698
699 /* Check the file type. */
700 filetype = dops->sf_get_ftype(sfep);
701 XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
702
703 offset = xfs_dir2_sf_get_offset(sfep) +
704 dops->data_entsize(sfep->namelen);
705
706 sfep = next_sfep;
707 }
708 XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
709 XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
710
711 /* Make sure this whole thing ought to be in local format. */
712 XFS_WANT_CORRUPTED_RETURN(mp, offset +
713 (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
714 (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
715
716 return 0;
717}
718
632/* 719/*
633 * Create a new (shortform) directory. 720 * Create a new (shortform) directory.
634 */ 721 */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e078aef6..9653e964eda4 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
33#include "xfs_trace.h" 33#include "xfs_trace.h"
34#include "xfs_attr_sf.h" 34#include "xfs_attr_sf.h"
35#include "xfs_da_format.h" 35#include "xfs_da_format.h"
36#include "xfs_da_btree.h"
37#include "xfs_dir2_priv.h"
36 38
37kmem_zone_t *xfs_ifork_zone; 39kmem_zone_t *xfs_ifork_zone;
38 40
@@ -320,6 +322,7 @@ xfs_iformat_local(
320 int whichfork, 322 int whichfork,
321 int size) 323 int size)
322{ 324{
325 int error;
323 326
324 /* 327 /*
325 * If the size is unreasonable, then something 328 * If the size is unreasonable, then something
@@ -336,6 +339,14 @@ xfs_iformat_local(
336 return -EFSCORRUPTED; 339 return -EFSCORRUPTED;
337 } 340 }
338 341
342 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
343 error = xfs_dir2_sf_verify(ip->i_mount,
344 (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
345 size);
346 if (error)
347 return error;
348 }
349
339 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); 350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
340 return 0; 351 return 0;
341} 352}
@@ -856,7 +867,7 @@ xfs_iextents_copy(
856 * In these cases, the format always takes precedence, because the 867 * In these cases, the format always takes precedence, because the
857 * format indicates the current state of the fork. 868 * format indicates the current state of the fork.
858 */ 869 */
859void 870int
860xfs_iflush_fork( 871xfs_iflush_fork(
861 xfs_inode_t *ip, 872 xfs_inode_t *ip,
862 xfs_dinode_t *dip, 873 xfs_dinode_t *dip,
@@ -866,6 +877,7 @@ xfs_iflush_fork(
866 char *cp; 877 char *cp;
867 xfs_ifork_t *ifp; 878 xfs_ifork_t *ifp;
868 xfs_mount_t *mp; 879 xfs_mount_t *mp;
880 int error;
869 static const short brootflag[2] = 881 static const short brootflag[2] =
870 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 882 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
871 static const short dataflag[2] = 883 static const short dataflag[2] =
@@ -874,7 +886,7 @@ xfs_iflush_fork(
874 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 886 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
875 887
876 if (!iip) 888 if (!iip)
877 return; 889 return 0;
878 ifp = XFS_IFORK_PTR(ip, whichfork); 890 ifp = XFS_IFORK_PTR(ip, whichfork);
879 /* 891 /*
880 * This can happen if we gave up in iformat in an error path, 892 * This can happen if we gave up in iformat in an error path,
@@ -882,12 +894,19 @@ xfs_iflush_fork(
882 */ 894 */
883 if (!ifp) { 895 if (!ifp) {
884 ASSERT(whichfork == XFS_ATTR_FORK); 896 ASSERT(whichfork == XFS_ATTR_FORK);
885 return; 897 return 0;
886 } 898 }
887 cp = XFS_DFORK_PTR(dip, whichfork); 899 cp = XFS_DFORK_PTR(dip, whichfork);
888 mp = ip->i_mount; 900 mp = ip->i_mount;
889 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 901 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
890 case XFS_DINODE_FMT_LOCAL: 902 case XFS_DINODE_FMT_LOCAL:
903 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
904 error = xfs_dir2_sf_verify(mp,
905 (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
906 ifp->if_bytes);
907 if (error)
908 return error;
909 }
891 if ((iip->ili_fields & dataflag[whichfork]) && 910 if ((iip->ili_fields & dataflag[whichfork]) &&
892 (ifp->if_bytes > 0)) { 911 (ifp->if_bytes > 0)) {
893 ASSERT(ifp->if_u1.if_data != NULL); 912 ASSERT(ifp->if_u1.if_data != NULL);
@@ -940,6 +959,7 @@ xfs_iflush_fork(
940 ASSERT(0); 959 ASSERT(0);
941 break; 960 break;
942 } 961 }
962 return 0;
943} 963}
944 964
945/* 965/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 7fb8365326d1..132dc59fdde6 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); 140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
141 141
142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); 142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
143void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, 143int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
144 struct xfs_inode_log_item *, int); 144 struct xfs_inode_log_item *, int);
145void xfs_idestroy_fork(struct xfs_inode *, int); 145void xfs_idestroy_fork(struct xfs_inode *, int);
146void xfs_idata_realloc(struct xfs_inode *, int, int); 146void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index bf65a9ea8642..61494295d92f 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -274,54 +274,49 @@ xfs_end_io(
274 struct xfs_ioend *ioend = 274 struct xfs_ioend *ioend =
275 container_of(work, struct xfs_ioend, io_work); 275 container_of(work, struct xfs_ioend, io_work);
276 struct xfs_inode *ip = XFS_I(ioend->io_inode); 276 struct xfs_inode *ip = XFS_I(ioend->io_inode);
277 xfs_off_t offset = ioend->io_offset;
278 size_t size = ioend->io_size;
277 int error = ioend->io_bio->bi_error; 279 int error = ioend->io_bio->bi_error;
278 280
279 /* 281 /*
280 * Set an error if the mount has shut down and proceed with end I/O 282 * Just clean up the in-memory strutures if the fs has been shut down.
281 * processing so it can perform whatever cleanups are necessary.
282 */ 283 */
283 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 284 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
284 error = -EIO; 285 error = -EIO;
286 goto done;
287 }
285 288
286 /* 289 /*
287 * For a CoW extent, we need to move the mapping from the CoW fork 290 * Clean up any COW blocks on an I/O error.
288 * to the data fork. If instead an error happened, just dump the
289 * new blocks.
290 */ 291 */
291 if (ioend->io_type == XFS_IO_COW) { 292 if (unlikely(error)) {
292 if (error) 293 switch (ioend->io_type) {
293 goto done; 294 case XFS_IO_COW:
294 if (ioend->io_bio->bi_error) { 295 xfs_reflink_cancel_cow_range(ip, offset, size, true);
295 error = xfs_reflink_cancel_cow_range(ip, 296 break;
296 ioend->io_offset, ioend->io_size);
297 goto done;
298 } 297 }
299 error = xfs_reflink_end_cow(ip, ioend->io_offset, 298
300 ioend->io_size); 299 goto done;
301 if (error)
302 goto done;
303 } 300 }
304 301
305 /* 302 /*
306 * For unwritten extents we need to issue transactions to convert a 303 * Success: commit the COW or unwritten blocks if needed.
307 * range to normal written extens after the data I/O has finished.
308 * Detecting and handling completion IO errors is done individually
309 * for each case as different cleanup operations need to be performed
310 * on error.
311 */ 304 */
312 if (ioend->io_type == XFS_IO_UNWRITTEN) { 305 switch (ioend->io_type) {
313 if (error) 306 case XFS_IO_COW:
314 goto done; 307 error = xfs_reflink_end_cow(ip, offset, size);
315 error = xfs_iomap_write_unwritten(ip, ioend->io_offset, 308 break;
316 ioend->io_size); 309 case XFS_IO_UNWRITTEN:
317 } else if (ioend->io_append_trans) { 310 error = xfs_iomap_write_unwritten(ip, offset, size);
318 error = xfs_setfilesize_ioend(ioend, error); 311 break;
319 } else { 312 default:
320 ASSERT(!xfs_ioend_is_append(ioend) || 313 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
321 ioend->io_type == XFS_IO_COW); 314 break;
322 } 315 }
323 316
324done: 317done:
318 if (ioend->io_append_trans)
319 error = xfs_setfilesize_ioend(ioend, error);
325 xfs_destroy_ioend(ioend, error); 320 xfs_destroy_ioend(ioend, error);
326} 321}
327 322
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 003a99b83bd8..ad9396e516f6 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
71 struct xfs_da_geometry *geo = args->geo; 71 struct xfs_da_geometry *geo = args->geo;
72 72
73 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 73 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
74 /*
75 * Give up if the directory is way too short.
76 */
77 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
78 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
79 return -EIO;
80 }
81
82 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 74 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
83 ASSERT(dp->i_df.if_u1.if_data != NULL); 75 ASSERT(dp->i_df.if_u1.if_data != NULL);
84 76
85 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 77 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
86 78
87 if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
88 return -EFSCORRUPTED;
89
90 /* 79 /*
91 * If the block number in the offset is out of range, we're done. 80 * If the block number in the offset is out of range, we're done.
92 */ 81 */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 7234b9748c36..3531f8f72fa5 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1608,7 +1608,7 @@ xfs_inode_free_cowblocks(
1608 xfs_ilock(ip, XFS_IOLOCK_EXCL); 1608 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1609 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 1609 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1610 1610
1611 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF); 1611 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1612 1612
1613 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); 1613 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1614 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1614 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index edfa6a55b064..c7fe2c2123ab 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1615,7 +1615,7 @@ xfs_itruncate_extents(
1615 1615
1616 /* Remove all pending CoW reservations. */ 1616 /* Remove all pending CoW reservations. */
1617 error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block, 1617 error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
1618 last_block); 1618 last_block, true);
1619 if (error) 1619 if (error)
1620 goto out; 1620 goto out;
1621 1621
@@ -3475,6 +3475,7 @@ xfs_iflush_int(
3475 struct xfs_inode_log_item *iip = ip->i_itemp; 3475 struct xfs_inode_log_item *iip = ip->i_itemp;
3476 struct xfs_dinode *dip; 3476 struct xfs_dinode *dip;
3477 struct xfs_mount *mp = ip->i_mount; 3477 struct xfs_mount *mp = ip->i_mount;
3478 int error;
3478 3479
3479 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3480 ASSERT(xfs_isiflocked(ip)); 3481 ASSERT(xfs_isiflocked(ip));
@@ -3557,9 +3558,14 @@ xfs_iflush_int(
3557 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3558 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3558 ip->i_d.di_flushiter = 0; 3559 ip->i_d.di_flushiter = 0;
3559 3560
3560 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3561 error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3561 if (XFS_IFORK_Q(ip)) 3562 if (error)
3562 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); 3563 return error;
3564 if (XFS_IFORK_Q(ip)) {
3565 error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3566 if (error)
3567 return error;
3568 }
3563 xfs_inobp_check(mp, bp); 3569 xfs_inobp_check(mp, bp);
3564 3570
3565 /* 3571 /*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 41662fb14e87..288ee5b840d7 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -630,6 +630,11 @@ retry:
630 goto out_unlock; 630 goto out_unlock;
631 } 631 }
632 632
633 /*
634 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
635 * them out if the write happens to fail.
636 */
637 iomap->flags = IOMAP_F_NEW;
633 trace_xfs_iomap_alloc(ip, offset, count, 0, &got); 638 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
634done: 639done:
635 if (isnullstartblock(got.br_startblock)) 640 if (isnullstartblock(got.br_startblock))
@@ -1071,16 +1076,22 @@ xfs_file_iomap_end_delalloc(
1071 struct xfs_inode *ip, 1076 struct xfs_inode *ip,
1072 loff_t offset, 1077 loff_t offset,
1073 loff_t length, 1078 loff_t length,
1074 ssize_t written) 1079 ssize_t written,
1080 struct iomap *iomap)
1075{ 1081{
1076 struct xfs_mount *mp = ip->i_mount; 1082 struct xfs_mount *mp = ip->i_mount;
1077 xfs_fileoff_t start_fsb; 1083 xfs_fileoff_t start_fsb;
1078 xfs_fileoff_t end_fsb; 1084 xfs_fileoff_t end_fsb;
1079 int error = 0; 1085 int error = 0;
1080 1086
1081 /* behave as if the write failed if drop writes is enabled */ 1087 /*
1082 if (xfs_mp_drop_writes(mp)) 1088 * Behave as if the write failed if drop writes is enabled. Set the NEW
1089 * flag to force delalloc cleanup.
1090 */
1091 if (xfs_mp_drop_writes(mp)) {
1092 iomap->flags |= IOMAP_F_NEW;
1083 written = 0; 1093 written = 0;
1094 }
1084 1095
1085 /* 1096 /*
1086 * start_fsb refers to the first unused block after a short write. If 1097 * start_fsb refers to the first unused block after a short write. If
@@ -1094,14 +1105,14 @@ xfs_file_iomap_end_delalloc(
1094 end_fsb = XFS_B_TO_FSB(mp, offset + length); 1105 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1095 1106
1096 /* 1107 /*
1097 * Trim back delalloc blocks if we didn't manage to write the whole 1108 * Trim delalloc blocks if they were allocated by this write and we
1098 * range reserved. 1109 * didn't manage to write the whole range.
1099 * 1110 *
1100 * We don't need to care about racing delalloc as we hold i_mutex 1111 * We don't need to care about racing delalloc as we hold i_mutex
1101 * across the reserve/allocate/unreserve calls. If there are delalloc 1112 * across the reserve/allocate/unreserve calls. If there are delalloc
1102 * blocks in the range, they are ours. 1113 * blocks in the range, they are ours.
1103 */ 1114 */
1104 if (start_fsb < end_fsb) { 1115 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1105 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb), 1116 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1106 XFS_FSB_TO_B(mp, end_fsb) - 1); 1117 XFS_FSB_TO_B(mp, end_fsb) - 1);
1107 1118
@@ -1131,7 +1142,7 @@ xfs_file_iomap_end(
1131{ 1142{
1132 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC) 1143 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1133 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset, 1144 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1134 length, written); 1145 length, written, iomap);
1135 return 0; 1146 return 0;
1136} 1147}
1137 1148
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 66e881790c17..2a6d9b1558e0 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -361,7 +361,6 @@ xfs_bulkstat(
361 xfs_agino_t agino; /* inode # in allocation group */ 361 xfs_agino_t agino; /* inode # in allocation group */
362 xfs_agnumber_t agno; /* allocation group number */ 362 xfs_agnumber_t agno; /* allocation group number */
363 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 363 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
364 size_t irbsize; /* size of irec buffer in bytes */
365 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 364 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
366 int nirbuf; /* size of irbuf */ 365 int nirbuf; /* size of irbuf */
367 int ubcount; /* size of user's buffer */ 366 int ubcount; /* size of user's buffer */
@@ -388,11 +387,10 @@ xfs_bulkstat(
388 *ubcountp = 0; 387 *ubcountp = 0;
389 *done = 0; 388 *done = 0;
390 389
391 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 390 irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
392 if (!irbuf) 391 if (!irbuf)
393 return -ENOMEM; 392 return -ENOMEM;
394 393 nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
395 nirbuf = irbsize / sizeof(*irbuf);
396 394
397 /* 395 /*
398 * Loop over the allocation groups, starting from the last 396 * Loop over the allocation groups, starting from the last
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 450bde68bb75..688ebff1f663 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -513,8 +513,7 @@ STATIC void
513xfs_set_inoalignment(xfs_mount_t *mp) 513xfs_set_inoalignment(xfs_mount_t *mp)
514{ 514{
515 if (xfs_sb_version_hasalign(&mp->m_sb) && 515 if (xfs_sb_version_hasalign(&mp->m_sb) &&
516 mp->m_sb.sb_inoalignmt >= 516 mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
517 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
518 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; 517 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
519 else 518 else
520 mp->m_inoalign_mask = 0; 519 mp->m_inoalign_mask = 0;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index da6d08fb359c..4a84c5ea266d 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -548,14 +548,18 @@ xfs_reflink_trim_irec_to_next_cow(
548} 548}
549 549
550/* 550/*
551 * Cancel all pending CoW reservations for some block range of an inode. 551 * Cancel CoW reservations for some block range of an inode.
552 *
553 * If cancel_real is true this function cancels all COW fork extents for the
554 * inode; if cancel_real is false, real extents are not cleared.
552 */ 555 */
553int 556int
554xfs_reflink_cancel_cow_blocks( 557xfs_reflink_cancel_cow_blocks(
555 struct xfs_inode *ip, 558 struct xfs_inode *ip,
556 struct xfs_trans **tpp, 559 struct xfs_trans **tpp,
557 xfs_fileoff_t offset_fsb, 560 xfs_fileoff_t offset_fsb,
558 xfs_fileoff_t end_fsb) 561 xfs_fileoff_t end_fsb,
562 bool cancel_real)
559{ 563{
560 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); 564 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
561 struct xfs_bmbt_irec got, del; 565 struct xfs_bmbt_irec got, del;
@@ -579,7 +583,7 @@ xfs_reflink_cancel_cow_blocks(
579 &idx, &got, &del); 583 &idx, &got, &del);
580 if (error) 584 if (error)
581 break; 585 break;
582 } else { 586 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
583 xfs_trans_ijoin(*tpp, ip, 0); 587 xfs_trans_ijoin(*tpp, ip, 0);
584 xfs_defer_init(&dfops, &firstfsb); 588 xfs_defer_init(&dfops, &firstfsb);
585 589
@@ -621,13 +625,17 @@ xfs_reflink_cancel_cow_blocks(
621} 625}
622 626
623/* 627/*
624 * Cancel all pending CoW reservations for some byte range of an inode. 628 * Cancel CoW reservations for some byte range of an inode.
629 *
630 * If cancel_real is true this function cancels all COW fork extents for the
631 * inode; if cancel_real is false, real extents are not cleared.
625 */ 632 */
626int 633int
627xfs_reflink_cancel_cow_range( 634xfs_reflink_cancel_cow_range(
628 struct xfs_inode *ip, 635 struct xfs_inode *ip,
629 xfs_off_t offset, 636 xfs_off_t offset,
630 xfs_off_t count) 637 xfs_off_t count,
638 bool cancel_real)
631{ 639{
632 struct xfs_trans *tp; 640 struct xfs_trans *tp;
633 xfs_fileoff_t offset_fsb; 641 xfs_fileoff_t offset_fsb;
@@ -653,7 +661,8 @@ xfs_reflink_cancel_cow_range(
653 xfs_trans_ijoin(tp, ip, 0); 661 xfs_trans_ijoin(tp, ip, 0);
654 662
655 /* Scrape out the old CoW reservations */ 663 /* Scrape out the old CoW reservations */
656 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb); 664 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
665 cancel_real);
657 if (error) 666 if (error)
658 goto out_cancel; 667 goto out_cancel;
659 668
@@ -1450,7 +1459,7 @@ next:
1450 * We didn't find any shared blocks so turn off the reflink flag. 1459 * We didn't find any shared blocks so turn off the reflink flag.
1451 * First, get rid of any leftover CoW mappings. 1460 * First, get rid of any leftover CoW mappings.
1452 */ 1461 */
1453 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF); 1462 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
1454 if (error) 1463 if (error)
1455 return error; 1464 return error;
1456 1465
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 33ac9b8db683..d29a7967f029 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -39,9 +39,9 @@ extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
39 39
40extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip, 40extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
41 struct xfs_trans **tpp, xfs_fileoff_t offset_fsb, 41 struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
42 xfs_fileoff_t end_fsb); 42 xfs_fileoff_t end_fsb, bool cancel_real);
43extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset, 43extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
44 xfs_off_t count); 44 xfs_off_t count, bool cancel_real);
45extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, 45extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
46 xfs_off_t count); 46 xfs_off_t count);
47extern int xfs_reflink_recover_cow(struct xfs_mount *mp); 47extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 890862f2447c..685c042a120f 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -953,7 +953,7 @@ xfs_fs_destroy_inode(
953 XFS_STATS_INC(ip->i_mount, vn_remove); 953 XFS_STATS_INC(ip->i_mount, vn_remove);
954 954
955 if (xfs_is_reflink_inode(ip)) { 955 if (xfs_is_reflink_inode(ip)) {
956 error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF); 956 error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
957 if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) 957 if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
958 xfs_warn(ip->i_mount, 958 xfs_warn(ip->i_mount,
959"Error %d while evicting CoW blocks for inode %llu.", 959"Error %d while evicting CoW blocks for inode %llu.",
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 5bdab6bffd23..928fd66b1271 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -15,7 +15,6 @@
15 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ 15 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
16 NULL: pmd_offset(pud, address)) 16 NULL: pmd_offset(pud, address))
17 17
18#define pud_alloc(mm, pgd, address) (pgd)
19#define pud_offset(pgd, start) (pgd) 18#define pud_offset(pgd, start) (pgd)
20#define pud_none(pud) 0 19#define pud_none(pud) 0
21#define pud_bad(pud) 0 20#define pud_bad(pud) 0
@@ -35,4 +34,6 @@
35#undef pud_addr_end 34#undef pud_addr_end
36#define pud_addr_end(addr, end) (end) 35#define pud_addr_end(addr, end) (end)
37 36
37#include <asm-generic/5level-fixup.h>
38
38#endif 39#endif
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
new file mode 100644
index 000000000000..b5ca82dc4175
--- /dev/null
+++ b/include/asm-generic/5level-fixup.h
@@ -0,0 +1,41 @@
1#ifndef _5LEVEL_FIXUP_H
2#define _5LEVEL_FIXUP_H
3
4#define __ARCH_HAS_5LEVEL_HACK
5#define __PAGETABLE_P4D_FOLDED
6
7#define P4D_SHIFT PGDIR_SHIFT
8#define P4D_SIZE PGDIR_SIZE
9#define P4D_MASK PGDIR_MASK
10#define PTRS_PER_P4D 1
11
12#define p4d_t pgd_t
13
14#define pud_alloc(mm, p4d, address) \
15 ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
16 NULL : pud_offset(p4d, address))
17
18#define p4d_alloc(mm, pgd, address) (pgd)
19#define p4d_offset(pgd, start) (pgd)
20#define p4d_none(p4d) 0
21#define p4d_bad(p4d) 0
22#define p4d_present(p4d) 1
23#define p4d_ERROR(p4d) do { } while (0)
24#define p4d_clear(p4d) pgd_clear(p4d)
25#define p4d_val(p4d) pgd_val(p4d)
26#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
27#define p4d_page(p4d) pgd_page(p4d)
28#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
29
30#define __p4d(x) __pgd(x)
31#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d)
32
33#undef p4d_free_tlb
34#define p4d_free_tlb(tlb, x, addr) do { } while (0)
35#define p4d_free(mm, x) do { } while (0)
36#define __p4d_free_tlb(tlb, x, addr) do { } while (0)
37
38#undef p4d_addr_end
39#define p4d_addr_end(addr, end) (end)
40
41#endif
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
new file mode 100644
index 000000000000..752fb7511750
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -0,0 +1,62 @@
1#ifndef _PGTABLE_NOP4D_HACK_H
2#define _PGTABLE_NOP4D_HACK_H
3
4#ifndef __ASSEMBLY__
5#include <asm-generic/5level-fixup.h>
6
7#define __PAGETABLE_PUD_FOLDED
8
9/*
10 * Having the pud type consist of a pgd gets the size right, and allows
11 * us to conceptually access the pgd entry that this pud is folded into
12 * without casting.
13 */
14typedef struct { pgd_t pgd; } pud_t;
15
16#define PUD_SHIFT PGDIR_SHIFT
17#define PTRS_PER_PUD 1
18#define PUD_SIZE (1UL << PUD_SHIFT)
19#define PUD_MASK (~(PUD_SIZE-1))
20
21/*
22 * The "pgd_xxx()" functions here are trivial for a folded two-level
23 * setup: the pud is never bad, and a pud always exists (as it's folded
24 * into the pgd entry)
25 */
26static inline int pgd_none(pgd_t pgd) { return 0; }
27static inline int pgd_bad(pgd_t pgd) { return 0; }
28static inline int pgd_present(pgd_t pgd) { return 1; }
29static inline void pgd_clear(pgd_t *pgd) { }
30#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
31
32#define pgd_populate(mm, pgd, pud) do { } while (0)
33/*
34 * (puds are folded into pgds so this doesn't get actually called,
35 * but the define is needed for a generic inline function.)
36 */
37#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
38
39static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
40{
41 return (pud_t *)pgd;
42}
43
44#define pud_val(x) (pgd_val((x).pgd))
45#define __pud(x) ((pud_t) { __pgd(x) })
46
47#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
48#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
49
50/*
51 * allocating and freeing a pud is trivial: the 1-entry pud is
52 * inside the pgd, so has no extra memory associated with it.
53 */
54#define pud_alloc_one(mm, address) NULL
55#define pud_free(mm, x) do { } while (0)
56#define __pud_free_tlb(tlb, x, a) do { } while (0)
57
58#undef pud_addr_end
59#define pud_addr_end(addr, end) (end)
60
61#endif /* __ASSEMBLY__ */
62#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
new file mode 100644
index 000000000000..de364ecb8df6
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -0,0 +1,56 @@
1#ifndef _PGTABLE_NOP4D_H
2#define _PGTABLE_NOP4D_H
3
4#ifndef __ASSEMBLY__
5
6#define __PAGETABLE_P4D_FOLDED
7
8typedef struct { pgd_t pgd; } p4d_t;
9
10#define P4D_SHIFT PGDIR_SHIFT
11#define PTRS_PER_P4D 1
12#define P4D_SIZE (1UL << P4D_SHIFT)
13#define P4D_MASK (~(P4D_SIZE-1))
14
15/*
16 * The "pgd_xxx()" functions here are trivial for a folded two-level
17 * setup: the p4d is never bad, and a p4d always exists (as it's folded
18 * into the pgd entry)
19 */
20static inline int pgd_none(pgd_t pgd) { return 0; }
21static inline int pgd_bad(pgd_t pgd) { return 0; }
22static inline int pgd_present(pgd_t pgd) { return 1; }
23static inline void pgd_clear(pgd_t *pgd) { }
24#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
25
26#define pgd_populate(mm, pgd, p4d) do { } while (0)
27/*
28 * (p4ds are folded into pgds so this doesn't get actually called,
29 * but the define is needed for a generic inline function.)
30 */
31#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval })
32
33static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
34{
35 return (p4d_t *)pgd;
36}
37
38#define p4d_val(x) (pgd_val((x).pgd))
39#define __p4d(x) ((p4d_t) { __pgd(x) })
40
41#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
42#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
43
44/*
45 * allocating and freeing a p4d is trivial: the 1-entry p4d is
46 * inside the pgd, so has no extra memory associated with it.
47 */
48#define p4d_alloc_one(mm, address) NULL
49#define p4d_free(mm, x) do { } while (0)
50#define __p4d_free_tlb(tlb, x, a) do { } while (0)
51
52#undef p4d_addr_end
53#define p4d_addr_end(addr, end) (end)
54
55#endif /* __ASSEMBLY__ */
56#endif /* _PGTABLE_NOP4D_H */
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 810431d8351b..c2b9b96d6268 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -3,52 +3,57 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#ifdef __ARCH_USE_5LEVEL_HACK
7#include <asm-generic/pgtable-nop4d-hack.h>
8#else
9#include <asm-generic/pgtable-nop4d.h>
10
6#define __PAGETABLE_PUD_FOLDED 11#define __PAGETABLE_PUD_FOLDED
7 12
8/* 13/*
9 * Having the pud type consist of a pgd gets the size right, and allows 14 * Having the pud type consist of a p4d gets the size right, and allows
10 * us to conceptually access the pgd entry that this pud is folded into 15 * us to conceptually access the p4d entry that this pud is folded into
11 * without casting. 16 * without casting.
12 */ 17 */
13typedef struct { pgd_t pgd; } pud_t; 18typedef struct { p4d_t p4d; } pud_t;
14 19
15#define PUD_SHIFT PGDIR_SHIFT 20#define PUD_SHIFT P4D_SHIFT
16#define PTRS_PER_PUD 1 21#define PTRS_PER_PUD 1
17#define PUD_SIZE (1UL << PUD_SHIFT) 22#define PUD_SIZE (1UL << PUD_SHIFT)
18#define PUD_MASK (~(PUD_SIZE-1)) 23#define PUD_MASK (~(PUD_SIZE-1))
19 24
20/* 25/*
21 * The "pgd_xxx()" functions here are trivial for a folded two-level 26 * The "p4d_xxx()" functions here are trivial for a folded two-level
22 * setup: the pud is never bad, and a pud always exists (as it's folded 27 * setup: the pud is never bad, and a pud always exists (as it's folded
23 * into the pgd entry) 28 * into the p4d entry)
24 */ 29 */
25static inline int pgd_none(pgd_t pgd) { return 0; } 30static inline int p4d_none(p4d_t p4d) { return 0; }
26static inline int pgd_bad(pgd_t pgd) { return 0; } 31static inline int p4d_bad(p4d_t p4d) { return 0; }
27static inline int pgd_present(pgd_t pgd) { return 1; } 32static inline int p4d_present(p4d_t p4d) { return 1; }
28static inline void pgd_clear(pgd_t *pgd) { } 33static inline void p4d_clear(p4d_t *p4d) { }
29#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) 34#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
30 35
31#define pgd_populate(mm, pgd, pud) do { } while (0) 36#define p4d_populate(mm, p4d, pud) do { } while (0)
32/* 37/*
33 * (puds are folded into pgds so this doesn't get actually called, 38 * (puds are folded into p4ds so this doesn't get actually called,
34 * but the define is needed for a generic inline function.) 39 * but the define is needed for a generic inline function.)
35 */ 40 */
36#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) 41#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval })
37 42
38static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) 43static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
39{ 44{
40 return (pud_t *)pgd; 45 return (pud_t *)p4d;
41} 46}
42 47
43#define pud_val(x) (pgd_val((x).pgd)) 48#define pud_val(x) (p4d_val((x).p4d))
44#define __pud(x) ((pud_t) { __pgd(x) } ) 49#define __pud(x) ((pud_t) { __p4d(x) })
45 50
46#define pgd_page(pgd) (pud_page((pud_t){ pgd })) 51#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
47#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) 52#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
48 53
49/* 54/*
50 * allocating and freeing a pud is trivial: the 1-entry pud is 55 * allocating and freeing a pud is trivial: the 1-entry pud is
51 * inside the pgd, so has no extra memory associated with it. 56 * inside the p4d, so has no extra memory associated with it.
52 */ 57 */
53#define pud_alloc_one(mm, address) NULL 58#define pud_alloc_one(mm, address) NULL
54#define pud_free(mm, x) do { } while (0) 59#define pud_free(mm, x) do { } while (0)
@@ -58,4 +63,5 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
58#define pud_addr_end(addr, end) (end) 63#define pud_addr_end(addr, end) (end)
59 64
60#endif /* __ASSEMBLY__ */ 65#endif /* __ASSEMBLY__ */
66#endif /* !__ARCH_USE_5LEVEL_HACK */
61#endif /* _PGTABLE_NOPUD_H */ 67#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f4ca23b158b3..1fad160f35de 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -10,9 +10,9 @@
10#include <linux/bug.h> 10#include <linux/bug.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12 12
13#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ 13#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
14 CONFIG_PGTABLE_LEVELS 14 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
15#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED 15#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
16#endif 16#endif
17 17
18/* 18/*
@@ -424,6 +424,13 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
424 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 424 (__boundary - 1 < (end) - 1)? __boundary: (end); \
425}) 425})
426 426
427#ifndef p4d_addr_end
428#define p4d_addr_end(addr, end) \
429({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
430 (__boundary - 1 < (end) - 1)? __boundary: (end); \
431})
432#endif
433
427#ifndef pud_addr_end 434#ifndef pud_addr_end
428#define pud_addr_end(addr, end) \ 435#define pud_addr_end(addr, end) \
429({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 436({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
@@ -444,6 +451,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
444 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 451 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
445 */ 452 */
446void pgd_clear_bad(pgd_t *); 453void pgd_clear_bad(pgd_t *);
454void p4d_clear_bad(p4d_t *);
447void pud_clear_bad(pud_t *); 455void pud_clear_bad(pud_t *);
448void pmd_clear_bad(pmd_t *); 456void pmd_clear_bad(pmd_t *);
449 457
@@ -458,6 +466,17 @@ static inline int pgd_none_or_clear_bad(pgd_t *pgd)
458 return 0; 466 return 0;
459} 467}
460 468
469static inline int p4d_none_or_clear_bad(p4d_t *p4d)
470{
471 if (p4d_none(*p4d))
472 return 1;
473 if (unlikely(p4d_bad(*p4d))) {
474 p4d_clear_bad(p4d);
475 return 1;
476 }
477 return 0;
478}
479
461static inline int pud_none_or_clear_bad(pud_t *pud) 480static inline int pud_none_or_clear_bad(pud_t *pud)
462{ 481{
463 if (pud_none(*pud)) 482 if (pud_none(*pud))
@@ -844,11 +863,30 @@ static inline int pmd_protnone(pmd_t pmd)
844#endif /* CONFIG_MMU */ 863#endif /* CONFIG_MMU */
845 864
846#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 865#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
866
867#ifndef __PAGETABLE_P4D_FOLDED
868int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
869int p4d_clear_huge(p4d_t *p4d);
870#else
871static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
872{
873 return 0;
874}
875static inline int p4d_clear_huge(p4d_t *p4d)
876{
877 return 0;
878}
879#endif /* !__PAGETABLE_P4D_FOLDED */
880
847int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); 881int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
848int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 882int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
849int pud_clear_huge(pud_t *pud); 883int pud_clear_huge(pud_t *pud);
850int pmd_clear_huge(pmd_t *pmd); 884int pmd_clear_huge(pmd_t *pmd);
851#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 885#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
886static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
887{
888 return 0;
889}
852static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 890static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
853{ 891{
854 return 0; 892 return 0;
@@ -857,6 +895,10 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
857{ 895{
858 return 0; 896 return 0;
859} 897}
898static inline int p4d_clear_huge(p4d_t *p4d)
899{
900 return 0;
901}
860static inline int pud_clear_huge(pud_t *pud) 902static inline int pud_clear_huge(pud_t *pud)
861{ 903{
862 return 0; 904 return 0;
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 4329bc6ef04b..8afa4335e5b2 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -270,6 +270,12 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
270 __pte_free_tlb(tlb, ptep, address); \ 270 __pte_free_tlb(tlb, ptep, address); \
271 } while (0) 271 } while (0)
272 272
273#define pmd_free_tlb(tlb, pmdp, address) \
274 do { \
275 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
276 __pmd_free_tlb(tlb, pmdp, address); \
277 } while (0)
278
273#ifndef __ARCH_HAS_4LEVEL_HACK 279#ifndef __ARCH_HAS_4LEVEL_HACK
274#define pud_free_tlb(tlb, pudp, address) \ 280#define pud_free_tlb(tlb, pudp, address) \
275 do { \ 281 do { \
@@ -278,11 +284,13 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
278 } while (0) 284 } while (0)
279#endif 285#endif
280 286
281#define pmd_free_tlb(tlb, pmdp, address) \ 287#ifndef __ARCH_HAS_5LEVEL_HACK
288#define p4d_free_tlb(tlb, pudp, address) \
282 do { \ 289 do { \
283 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 290 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
284 __pmd_free_tlb(tlb, pmdp, address); \ 291 __p4d_free_tlb(tlb, pudp, address); \
285 } while (0) 292 } while (0)
293#endif
286 294
287#define tlb_migrate_finish(mm) do {} while (0) 295#define tlb_migrate_finish(mm) do {} while (0)
288 296
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index a2bfd7843f18..e2b9c6fe2714 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -73,7 +73,7 @@ int af_alg_unregister_type(const struct af_alg_type *type);
73 73
74int af_alg_release(struct socket *sock); 74int af_alg_release(struct socket *sock);
75void af_alg_release_parent(struct sock *sk); 75void af_alg_release_parent(struct sock *sk);
76int af_alg_accept(struct sock *sk, struct socket *newsock); 76int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
77 77
78int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); 78int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
79void af_alg_free_sg(struct af_alg_sgl *sgl); 79void af_alg_free_sg(struct af_alg_sgl *sgl);
diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h
index 399a123aed58..db69d84ed7d1 100644
--- a/include/dt-bindings/sound/cs42l42.h
+++ b/include/dt-bindings/sound/cs42l42.h
@@ -20,7 +20,7 @@
20#define CS42L42_HPOUT_LOAD_1NF 0 20#define CS42L42_HPOUT_LOAD_1NF 0
21#define CS42L42_HPOUT_LOAD_10NF 1 21#define CS42L42_HPOUT_LOAD_10NF 1
22 22
23/* HPOUT Clamp to GND Overide */ 23/* HPOUT Clamp to GND Override */
24#define CS42L42_HPOUT_CLAMP_EN 0 24#define CS42L42_HPOUT_CLAMP_EN 0
25#define CS42L42_HPOUT_CLAMP_DIS 1 25#define CS42L42_HPOUT_CLAMP_DIS 1
26 26
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 796016e63c1d..5a7da607ca04 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -435,7 +435,6 @@ struct request_queue {
435 struct delayed_work delay_work; 435 struct delayed_work delay_work;
436 436
437 struct backing_dev_info *backing_dev_info; 437 struct backing_dev_info *backing_dev_info;
438 struct disk_devt *disk_devt;
439 438
440 /* 439 /*
441 * The queue owner gets to use this for whatever they like. 440 * The queue owner gets to use this for whatever they like.
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 1816c5e26581..88cd5dc8e238 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -48,6 +48,7 @@ struct ceph_options {
48 unsigned long mount_timeout; /* jiffies */ 48 unsigned long mount_timeout; /* jiffies */
49 unsigned long osd_idle_ttl; /* jiffies */ 49 unsigned long osd_idle_ttl; /* jiffies */
50 unsigned long osd_keepalive_timeout; /* jiffies */ 50 unsigned long osd_keepalive_timeout; /* jiffies */
51 unsigned long osd_request_timeout; /* jiffies */
51 52
52 /* 53 /*
53 * any type that can't be simply compared or doesn't need need 54 * any type that can't be simply compared or doesn't need need
@@ -68,6 +69,7 @@ struct ceph_options {
68#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) 69#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000)
69#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) 70#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
70#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) 71#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
72#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
71 73
72#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) 74#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
73#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) 75#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 2ea0c282f3dc..c125b5d9e13c 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -189,6 +189,7 @@ struct ceph_osd_request {
189 189
190 /* internal */ 190 /* internal */
191 unsigned long r_stamp; /* jiffies, send or check time */ 191 unsigned long r_stamp; /* jiffies, send or check time */
192 unsigned long r_start_stamp; /* jiffies */
192 int r_attempts; 193 int r_attempts;
193 struct ceph_eversion r_replay_version; /* aka reassert_version */ 194 struct ceph_eversion r_replay_version; /* aka reassert_version */
194 u32 r_last_force_resend; 195 u32 r_last_force_resend;
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61d042bbbf60..68449293c4b6 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -163,6 +163,7 @@ struct dccp_request_sock {
163 __u64 dreq_isr; 163 __u64 dreq_isr;
164 __u64 dreq_gsr; 164 __u64 dreq_gsr;
165 __be32 dreq_service; 165 __be32 dreq_service;
166 spinlock_t dreq_lock;
166 struct list_head dreq_featneg; 167 struct list_head dreq_featneg;
167 __u32 dreq_timestamp_echo; 168 __u32 dreq_timestamp_echo;
168 __u32 dreq_timestamp_time; 169 __u32 dreq_timestamp_time;
diff --git a/include/linux/device.h b/include/linux/device.h
index 30c4570e928d..9ef518af5515 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev)
1140extern void lock_device_hotplug(void); 1140extern void lock_device_hotplug(void);
1141extern void unlock_device_hotplug(void); 1141extern void unlock_device_hotplug(void);
1142extern int lock_device_hotplug_sysfs(void); 1142extern int lock_device_hotplug_sysfs(void);
1143void assert_held_device_hotplug(void);
1144extern int device_offline(struct device *dev); 1143extern int device_offline(struct device *dev);
1145extern int device_online(struct device *dev); 1144extern int device_online(struct device *dev);
1146extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1145extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0c167fdee5f7..fbf7b39e8103 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -409,6 +409,7 @@ struct bpf_prog {
409 u16 pages; /* Number of allocated pages */ 409 u16 pages; /* Number of allocated pages */
410 kmemcheck_bitfield_begin(meta); 410 kmemcheck_bitfield_begin(meta);
411 u16 jited:1, /* Is our filter JIT'ed? */ 411 u16 jited:1, /* Is our filter JIT'ed? */
412 locked:1, /* Program image locked? */
412 gpl_compatible:1, /* Is filter GPL compatible? */ 413 gpl_compatible:1, /* Is filter GPL compatible? */
413 cb_access:1, /* Is control block accessed? */ 414 cb_access:1, /* Is control block accessed? */
414 dst_needed:1, /* Do we need dst entry? */ 415 dst_needed:1, /* Do we need dst entry? */
@@ -554,22 +555,29 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
554#ifdef CONFIG_ARCH_HAS_SET_MEMORY 555#ifdef CONFIG_ARCH_HAS_SET_MEMORY
555static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 556static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
556{ 557{
557 set_memory_ro((unsigned long)fp, fp->pages); 558 fp->locked = 1;
559 WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
558} 560}
559 561
560static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 562static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
561{ 563{
562 set_memory_rw((unsigned long)fp, fp->pages); 564 if (fp->locked) {
565 WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
566 /* In case set_memory_rw() fails, we want to be the first
567 * to crash here instead of some random place later on.
568 */
569 fp->locked = 0;
570 }
563} 571}
564 572
565static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) 573static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
566{ 574{
567 set_memory_ro((unsigned long)hdr, hdr->pages); 575 WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
568} 576}
569 577
570static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) 578static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
571{ 579{
572 set_memory_rw((unsigned long)hdr, hdr->pages); 580 WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
573} 581}
574#else 582#else
575static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 583static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index aad3fd0ff5f8..7251f7bb45e8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2678,7 +2678,7 @@ static const char * const kernel_read_file_str[] = {
2678 2678
2679static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) 2679static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
2680{ 2680{
2681 if (id < 0 || id >= READING_MAX_ID) 2681 if ((unsigned)id >= READING_MAX_ID)
2682 return kernel_read_file_str[READING_UNKNOWN]; 2682 return kernel_read_file_str[READING_UNKNOWN];
2683 2683
2684 return kernel_read_file_str[id]; 2684 return kernel_read_file_str[id];
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index a999d281a2f1..76f39754e7b0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -167,13 +167,6 @@ struct blk_integrity {
167}; 167};
168 168
169#endif /* CONFIG_BLK_DEV_INTEGRITY */ 169#endif /* CONFIG_BLK_DEV_INTEGRITY */
170struct disk_devt {
171 atomic_t count;
172 void (*release)(struct disk_devt *disk_devt);
173};
174
175void put_disk_devt(struct disk_devt *disk_devt);
176void get_disk_devt(struct disk_devt *disk_devt);
177 170
178struct gendisk { 171struct gendisk {
179 /* major, first_minor and minors are input parameters only, 172 /* major, first_minor and minors are input parameters only,
@@ -183,7 +176,6 @@ struct gendisk {
183 int first_minor; 176 int first_minor;
184 int minors; /* maximum number of minors, =1 for 177 int minors; /* maximum number of minors, =1 for
185 * disks that can't be partitioned. */ 178 * disks that can't be partitioned. */
186 struct disk_devt *disk_devt;
187 179
188 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 180 char disk_name[DISK_NAME_LEN]; /* name of major driver */
189 char *(*devnode)(struct gendisk *gd, umode_t *mode); 181 char *(*devnode)(struct gendisk *gd, umode_t *mode);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 503099d8aada..b857fc8cc2ec 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -122,7 +122,7 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
122struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 122struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
123 pud_t *pud, int flags); 123 pud_t *pud, int flags);
124int pmd_huge(pmd_t pmd); 124int pmd_huge(pmd_t pmd);
125int pud_huge(pud_t pmd); 125int pud_huge(pud_t pud);
126unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 126unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
127 unsigned long address, unsigned long end, pgprot_t newprot); 127 unsigned long address, unsigned long end, pgprot_t newprot);
128 128
@@ -197,6 +197,9 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
197#ifndef pgd_huge 197#ifndef pgd_huge
198#define pgd_huge(x) 0 198#define pgd_huge(x) 0
199#endif 199#endif
200#ifndef p4d_huge
201#define p4d_huge(x) 0
202#endif
200 203
201#ifndef pgd_write 204#ifndef pgd_write
202static inline int pgd_write(pgd_t pgd) 205static inline int pgd_write(pgd_t pgd)
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 672cfef72fc8..97cbca19430d 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -373,6 +373,8 @@
373#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) 373#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT)
374#define ICC_IGRPEN1_EL1_SHIFT 0 374#define ICC_IGRPEN1_EL1_SHIFT 0
375#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) 375#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT)
376#define ICC_SRE_EL1_DIB (1U << 2)
377#define ICC_SRE_EL1_DFB (1U << 1)
376#define ICC_SRE_EL1_SRE (1U << 0) 378#define ICC_SRE_EL1_SRE (1U << 0)
377 379
378/* 380/*
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 188eced6813e..9f3616085423 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -524,6 +524,10 @@ static inline struct irq_domain *irq_find_matching_fwnode(
524{ 524{
525 return NULL; 525 return NULL;
526} 526}
527static inline bool irq_domain_check_msi_remap(void)
528{
529 return false;
530}
527#endif /* !CONFIG_IRQ_DOMAIN */ 531#endif /* !CONFIG_IRQ_DOMAIN */
528 532
529#endif /* _LINUX_IRQDOMAIN_H */ 533#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 8e06d758ee48..2afd74b9d844 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -90,6 +90,13 @@ extern bool static_key_initialized;
90struct static_key { 90struct static_key {
91 atomic_t enabled; 91 atomic_t enabled;
92/* 92/*
93 * Note:
94 * To make anonymous unions work with old compilers, the static
95 * initialization of them requires brackets. This creates a dependency
96 * on the order of the struct with the initializers. If any fields
97 * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
98 * to be modified.
99 *
93 * bit 0 => 1 if key is initially true 100 * bit 0 => 1 if key is initially true
94 * 0 if initially false 101 * 0 if initially false
95 * bit 1 => 1 if points to struct static_key_mod 102 * bit 1 => 1 if points to struct static_key_mod
@@ -166,10 +173,10 @@ extern void static_key_disable(struct static_key *key);
166 */ 173 */
167#define STATIC_KEY_INIT_TRUE \ 174#define STATIC_KEY_INIT_TRUE \
168 { .enabled = { 1 }, \ 175 { .enabled = { 1 }, \
169 .entries = (void *)JUMP_TYPE_TRUE } 176 { .entries = (void *)JUMP_TYPE_TRUE } }
170#define STATIC_KEY_INIT_FALSE \ 177#define STATIC_KEY_INIT_FALSE \
171 { .enabled = { 0 }, \ 178 { .enabled = { 0 }, \
172 .entries = (void *)JUMP_TYPE_FALSE } 179 { .entries = (void *)JUMP_TYPE_FALSE } }
173 180
174#else /* !HAVE_JUMP_LABEL */ 181#else /* !HAVE_JUMP_LABEL */
175 182
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index ceb3fe78a0d3..5734480c9590 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -6,6 +6,7 @@
6struct kmem_cache; 6struct kmem_cache;
7struct page; 7struct page;
8struct vm_struct; 8struct vm_struct;
9struct task_struct;
9 10
10#ifdef CONFIG_KASAN 11#ifdef CONFIG_KASAN
11 12
@@ -18,6 +19,7 @@ extern unsigned char kasan_zero_page[PAGE_SIZE];
18extern pte_t kasan_zero_pte[PTRS_PER_PTE]; 19extern pte_t kasan_zero_pte[PTRS_PER_PTE];
19extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; 20extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
20extern pud_t kasan_zero_pud[PTRS_PER_PUD]; 21extern pud_t kasan_zero_pud[PTRS_PER_PUD];
22extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
21 23
22void kasan_populate_zero_shadow(const void *shadow_start, 24void kasan_populate_zero_shadow(const void *shadow_start,
23 const void *shadow_end); 25 const void *shadow_end);
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index b01fe1009084..87ff4f58a2f0 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -29,6 +29,11 @@ struct hlist_nulls_node {
29 ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) 29 ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
30 30
31#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) 31#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
32
33#define hlist_nulls_entry_safe(ptr, type, member) \
34 ({ typeof(ptr) ____ptr = (ptr); \
35 !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
36 })
32/** 37/**
33 * ptr_is_a_nulls - Test if a ptr is a nulls 38 * ptr_is_a_nulls - Test if a ptr is a nulls
34 * @ptr: ptr to be tested 39 * @ptr: ptr to be tested
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0d65dd72c0f4..5f01c88f0800 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1560,14 +1560,24 @@ static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1560 return ptep; 1560 return ptep;
1561} 1561}
1562 1562
1563#ifdef __PAGETABLE_P4D_FOLDED
1564static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1565 unsigned long address)
1566{
1567 return 0;
1568}
1569#else
1570int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1571#endif
1572
1563#ifdef __PAGETABLE_PUD_FOLDED 1573#ifdef __PAGETABLE_PUD_FOLDED
1564static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 1574static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1565 unsigned long address) 1575 unsigned long address)
1566{ 1576{
1567 return 0; 1577 return 0;
1568} 1578}
1569#else 1579#else
1570int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1580int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1571#endif 1581#endif
1572 1582
1573#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1583#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
@@ -1619,11 +1629,22 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1619 * Remove it when 4level-fixup.h has been removed. 1629 * Remove it when 4level-fixup.h has been removed.
1620 */ 1630 */
1621#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1631#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1622static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 1632
1633#ifndef __ARCH_HAS_5LEVEL_HACK
1634static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1635 unsigned long address)
1636{
1637 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1638 NULL : p4d_offset(pgd, address);
1639}
1640
1641static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1642 unsigned long address)
1623{ 1643{
1624 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? 1644 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1625 NULL: pud_offset(pgd, address); 1645 NULL : pud_offset(p4d, address);
1626} 1646}
1647#endif /* !__ARCH_HAS_5LEVEL_HACK */
1627 1648
1628static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1649static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1629{ 1650{
@@ -2385,7 +2406,8 @@ void sparse_mem_maps_populate_node(struct page **map_map,
2385 2406
2386struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2407struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2387pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2408pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2388pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 2409p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2410pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2389pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2411pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2390pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2412pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2391void *vmemmap_alloc_block(unsigned long size, int node); 2413void *vmemmap_alloc_block(unsigned long size, int node);
diff --git a/include/linux/net.h b/include/linux/net.h
index cd0c8bd0a1de..0620f5e18c96 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -146,7 +146,7 @@ struct proto_ops {
146 int (*socketpair)(struct socket *sock1, 146 int (*socketpair)(struct socket *sock1,
147 struct socket *sock2); 147 struct socket *sock2);
148 int (*accept) (struct socket *sock, 148 int (*accept) (struct socket *sock,
149 struct socket *newsock, int flags); 149 struct socket *newsock, int flags, bool kern);
150 int (*getname) (struct socket *sock, 150 int (*getname) (struct socket *sock,
151 struct sockaddr *addr, 151 struct sockaddr *addr,
152 int *sockaddr_len, int peer); 152 int *sockaddr_len, int peer);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 772476028a65..43a774873aa9 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -837,6 +837,10 @@ int genphy_read_status(struct phy_device *phydev);
837int genphy_suspend(struct phy_device *phydev); 837int genphy_suspend(struct phy_device *phydev);
838int genphy_resume(struct phy_device *phydev); 838int genphy_resume(struct phy_device *phydev);
839int genphy_soft_reset(struct phy_device *phydev); 839int genphy_soft_reset(struct phy_device *phydev);
840static inline int genphy_no_soft_reset(struct phy_device *phydev)
841{
842 return 0;
843}
840void phy_driver_unregister(struct phy_driver *drv); 844void phy_driver_unregister(struct phy_driver *drv);
841void phy_drivers_unregister(struct phy_driver *drv, int n); 845void phy_drivers_unregister(struct phy_driver *drv, int n);
842int phy_driver_register(struct phy_driver *new_driver, struct module *owner); 846int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
new file mode 100644
index 000000000000..d60d4e278609
--- /dev/null
+++ b/include/linux/purgatory.h
@@ -0,0 +1,23 @@
1#ifndef _LINUX_PURGATORY_H
2#define _LINUX_PURGATORY_H
3
4#include <linux/types.h>
5#include <crypto/sha.h>
6#include <uapi/linux/kexec.h>
7
8struct kexec_sha_region {
9 unsigned long start;
10 unsigned long len;
11};
12
13/*
14 * These forward declarations serve two purposes:
15 *
16 * 1) Make sparse happy when checking arch/purgatory
17 * 2) Document that these are required to be global so the symbol
18 * lookup in kexec works
19 */
20extern struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
21extern u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
22
23#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index 7bd2403e4fef..ed5c3838780d 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -37,14 +37,26 @@ extern void get_random_bytes(void *buf, int nbytes);
37extern int add_random_ready_callback(struct random_ready_callback *rdy); 37extern int add_random_ready_callback(struct random_ready_callback *rdy);
38extern void del_random_ready_callback(struct random_ready_callback *rdy); 38extern void del_random_ready_callback(struct random_ready_callback *rdy);
39extern void get_random_bytes_arch(void *buf, int nbytes); 39extern void get_random_bytes_arch(void *buf, int nbytes);
40extern int random_int_secret_init(void);
41 40
42#ifndef MODULE 41#ifndef MODULE
43extern const struct file_operations random_fops, urandom_fops; 42extern const struct file_operations random_fops, urandom_fops;
44#endif 43#endif
45 44
46unsigned int get_random_int(void); 45u32 get_random_u32(void);
47unsigned long get_random_long(void); 46u64 get_random_u64(void);
47static inline unsigned int get_random_int(void)
48{
49 return get_random_u32();
50}
51static inline unsigned long get_random_long(void)
52{
53#if BITS_PER_LONG == 64
54 return get_random_u64();
55#else
56 return get_random_u32();
57#endif
58}
59
48unsigned long randomize_page(unsigned long start, unsigned long range); 60unsigned long randomize_page(unsigned long start, unsigned long range);
49 61
50u32 prandom_u32(void); 62u32 prandom_u32(void);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 4ae95f7e8597..a23a33153180 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -156,5 +156,19 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
156 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ 156 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
157 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) 157 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
158 158
159/**
160 * hlist_nulls_for_each_entry_safe -
161 * iterate over list of given type safe against removal of list entry
162 * @tpos: the type * to use as a loop cursor.
163 * @pos: the &struct hlist_nulls_node to use as a loop cursor.
164 * @head: the head for your list.
165 * @member: the name of the hlist_nulls_node within the struct.
166 */
167#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
168 for (({barrier();}), \
169 pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
170 (!is_a_nulls(pos)) && \
171 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
172 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
159#endif 173#endif
160#endif 174#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index ad3e5158e586..c9f795e9a2ee 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -65,7 +65,7 @@ struct regulator_state {
65 int uV; /* suspend voltage */ 65 int uV; /* suspend voltage */
66 unsigned int mode; /* suspend regulator operating mode */ 66 unsigned int mode; /* suspend regulator operating mode */
67 int enabled; /* is regulator enabled in this suspend state */ 67 int enabled; /* is regulator enabled in this suspend state */
68 int disabled; /* is the regulator disbled in this suspend state */ 68 int disabled; /* is the regulator disabled in this suspend state */
69}; 69};
70 70
71/** 71/**
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index be765234c0a2..32354b4b4b2b 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -72,7 +72,7 @@ struct ucounts {
72 struct hlist_node node; 72 struct hlist_node node;
73 struct user_namespace *ns; 73 struct user_namespace *ns;
74 kuid_t uid; 74 kuid_t uid;
75 atomic_t count; 75 int count;
76 atomic_t ucount[UCOUNT_COUNTS]; 76 atomic_t ucount[UCOUNT_COUNTS];
77}; 77};
78 78
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 0468548acebf..48a3483dccb1 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -61,8 +61,7 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
61 unsigned long from, unsigned long to, 61 unsigned long from, unsigned long to,
62 unsigned long len); 62 unsigned long len);
63 63
64extern void userfaultfd_remove(struct vm_area_struct *vma, 64extern bool userfaultfd_remove(struct vm_area_struct *vma,
65 struct vm_area_struct **prev,
66 unsigned long start, 65 unsigned long start,
67 unsigned long end); 66 unsigned long end);
68 67
@@ -72,8 +71,6 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
72extern void userfaultfd_unmap_complete(struct mm_struct *mm, 71extern void userfaultfd_unmap_complete(struct mm_struct *mm,
73 struct list_head *uf); 72 struct list_head *uf);
74 73
75extern void userfaultfd_exit(struct mm_struct *mm);
76
77#else /* CONFIG_USERFAULTFD */ 74#else /* CONFIG_USERFAULTFD */
78 75
79/* mm helpers */ 76/* mm helpers */
@@ -120,11 +117,11 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
120{ 117{
121} 118}
122 119
123static inline void userfaultfd_remove(struct vm_area_struct *vma, 120static inline bool userfaultfd_remove(struct vm_area_struct *vma,
124 struct vm_area_struct **prev,
125 unsigned long start, 121 unsigned long start,
126 unsigned long end) 122 unsigned long end)
127{ 123{
124 return true;
128} 125}
129 126
130static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, 127static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
@@ -139,10 +136,6 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
139{ 136{
140} 137}
141 138
142static inline void userfaultfd_exit(struct mm_struct *mm)
143{
144}
145
146#endif /* CONFIG_USERFAULTFD */ 139#endif /* CONFIG_USERFAULTFD */
147 140
148#endif /* _LINUX_USERFAULTFD_K_H */ 141#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 6aa1b6cb5828..a80b7b59cf33 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -79,6 +79,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
79 THP_SPLIT_PAGE_FAILED, 79 THP_SPLIT_PAGE_FAILED,
80 THP_DEFERRED_SPLIT_PAGE, 80 THP_DEFERRED_SPLIT_PAGE,
81 THP_SPLIT_PMD, 81 THP_SPLIT_PMD,
82#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
83 THP_SPLIT_PUD,
84#endif
82 THP_ZERO_PAGE_ALLOC, 85 THP_ZERO_PAGE_ALLOC,
83 THP_ZERO_PAGE_ALLOC_FAILED, 86 THP_ZERO_PAGE_ALLOC_FAILED,
84#endif 87#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index aacb1282d19a..db076ca7f11d 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -620,30 +620,19 @@ do { \
620 __ret; \ 620 __ret; \
621}) 621})
622 622
623extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
624extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
623 625
624#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 626#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
625({ \ 627({ \
626 int __ret = 0; \ 628 int __ret; \
627 DEFINE_WAIT(__wait); \ 629 DEFINE_WAIT(__wait); \
628 if (exclusive) \ 630 if (exclusive) \
629 __wait.flags |= WQ_FLAG_EXCLUSIVE; \ 631 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
630 do { \ 632 do { \
631 if (likely(list_empty(&__wait.task_list))) \ 633 __ret = fn(&(wq), &__wait); \
632 __add_wait_queue_tail(&(wq), &__wait); \ 634 if (__ret) \
633 set_current_state(TASK_INTERRUPTIBLE); \
634 if (signal_pending(current)) { \
635 __ret = -ERESTARTSYS; \
636 break; \ 635 break; \
637 } \
638 if (irq) \
639 spin_unlock_irq(&(wq).lock); \
640 else \
641 spin_unlock(&(wq).lock); \
642 schedule(); \
643 if (irq) \
644 spin_lock_irq(&(wq).lock); \
645 else \
646 spin_lock(&(wq).lock); \
647 } while (!(condition)); \ 636 } while (!(condition)); \
648 __remove_wait_queue(&(wq), &__wait); \ 637 __remove_wait_queue(&(wq), &__wait); \
649 __set_current_state(TASK_RUNNING); \ 638 __set_current_state(TASK_RUNNING); \
@@ -676,7 +665,7 @@ do { \
676 */ 665 */
677#define wait_event_interruptible_locked(wq, condition) \ 666#define wait_event_interruptible_locked(wq, condition) \
678 ((condition) \ 667 ((condition) \
679 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) 668 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
680 669
681/** 670/**
682 * wait_event_interruptible_locked_irq - sleep until a condition gets true 671 * wait_event_interruptible_locked_irq - sleep until a condition gets true
@@ -703,7 +692,7 @@ do { \
703 */ 692 */
704#define wait_event_interruptible_locked_irq(wq, condition) \ 693#define wait_event_interruptible_locked_irq(wq, condition) \
705 ((condition) \ 694 ((condition) \
706 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) 695 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
707 696
708/** 697/**
709 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true 698 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
@@ -734,7 +723,7 @@ do { \
734 */ 723 */
735#define wait_event_interruptible_exclusive_locked(wq, condition) \ 724#define wait_event_interruptible_exclusive_locked(wq, condition) \
736 ((condition) \ 725 ((condition) \
737 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) 726 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
738 727
739/** 728/**
740 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true 729 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
@@ -765,7 +754,7 @@ do { \
765 */ 754 */
766#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ 755#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
767 ((condition) \ 756 ((condition) \
768 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 757 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
769 758
770 759
771#define __wait_event_killable(wq, condition) \ 760#define __wait_event_killable(wq, condition) \
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 458b400373d4..38aac554dbba 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -20,8 +20,17 @@ struct device;
20 20
21int vsp1_du_init(struct device *dev); 21int vsp1_du_init(struct device *dev);
22 22
23int vsp1_du_setup_lif(struct device *dev, unsigned int width, 23/**
24 unsigned int height); 24 * struct vsp1_du_lif_config - VSP LIF configuration
25 * @width: output frame width
26 * @height: output frame height
27 */
28struct vsp1_du_lif_config {
29 unsigned int width;
30 unsigned int height;
31};
32
33int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg);
25 34
26struct vsp1_du_atomic_config { 35struct vsp1_du_atomic_config {
27 u32 pixelformat; 36 u32 pixelformat;
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b7952d55b9c0..f39ae697347f 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -20,7 +20,8 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
20 int addr_len, int flags, int is_sendmsg); 20 int addr_len, int flags, int is_sendmsg);
21int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, 21int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
22 int addr_len, int flags); 22 int addr_len, int flags);
23int inet_accept(struct socket *sock, struct socket *newsock, int flags); 23int inet_accept(struct socket *sock, struct socket *newsock, int flags,
24 bool kern);
24int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); 25int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
25ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, 26ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
26 size_t size, int flags); 27 size_t size, int flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 826f198374f8..c7a577976bec 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -258,7 +258,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
258 return (unsigned long)min_t(u64, when, max_when); 258 return (unsigned long)min_t(u64, when, max_when);
259} 259}
260 260
261struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 261struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
262 262
263int inet_csk_get_port(struct sock *sk, unsigned short snum); 263int inet_csk_get_port(struct sock *sk, unsigned short snum);
264 264
diff --git a/include/net/irda/timer.h b/include/net/irda/timer.h
index cb2615ccf761..d784f242cf7b 100644
--- a/include/net/irda/timer.h
+++ b/include/net/irda/timer.h
@@ -59,7 +59,7 @@ struct lap_cb;
59 * Slot timer must never exceed 85 ms, and must always be at least 25 ms, 59 * Slot timer must never exceed 85 ms, and must always be at least 25 ms,
60 * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of 60 * suggested to 75-85 msec by IrDA lite. This doesn't work with a lot of
61 * devices, and other stackes uses a lot more, so it's best we do it as well 61 * devices, and other stackes uses a lot more, so it's best we do it as well
62 * (Note : this is the default value and sysctl overides it - Jean II) 62 * (Note : this is the default value and sysctl overrides it - Jean II)
63 */ 63 */
64#define SLOT_TIMEOUT (90*HZ/1000) 64#define SLOT_TIMEOUT (90*HZ/1000)
65 65
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a244db5e5ff7..07a0b128625a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -476,7 +476,8 @@ struct sctp_pf {
476 int (*send_verify) (struct sctp_sock *, union sctp_addr *); 476 int (*send_verify) (struct sctp_sock *, union sctp_addr *);
477 int (*supported_addrs)(const struct sctp_sock *, __be16 *); 477 int (*supported_addrs)(const struct sctp_sock *, __be16 *);
478 struct sock *(*create_accept_sk) (struct sock *sk, 478 struct sock *(*create_accept_sk) (struct sock *sk,
479 struct sctp_association *asoc); 479 struct sctp_association *asoc,
480 bool kern);
480 int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr); 481 int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
481 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk); 482 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
482 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk); 483 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
diff --git a/include/net/sock.h b/include/net/sock.h
index 5e5997654db6..03252d53975d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -236,6 +236,7 @@ struct sock_common {
236 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 236 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
237 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 237 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
238 * @sk_lock: synchronizer 238 * @sk_lock: synchronizer
239 * @sk_kern_sock: True if sock is using kernel lock classes
239 * @sk_rcvbuf: size of receive buffer in bytes 240 * @sk_rcvbuf: size of receive buffer in bytes
240 * @sk_wq: sock wait queue and async head 241 * @sk_wq: sock wait queue and async head
241 * @sk_rx_dst: receive input route used by early demux 242 * @sk_rx_dst: receive input route used by early demux
@@ -430,7 +431,8 @@ struct sock {
430#endif 431#endif
431 432
432 kmemcheck_bitfield_begin(flags); 433 kmemcheck_bitfield_begin(flags);
433 unsigned int sk_padding : 2, 434 unsigned int sk_padding : 1,
435 sk_kern_sock : 1,
434 sk_no_check_tx : 1, 436 sk_no_check_tx : 1,
435 sk_no_check_rx : 1, 437 sk_no_check_rx : 1,
436 sk_userlocks : 4, 438 sk_userlocks : 4,
@@ -1015,7 +1017,8 @@ struct proto {
1015 int addr_len); 1017 int addr_len);
1016 int (*disconnect)(struct sock *sk, int flags); 1018 int (*disconnect)(struct sock *sk, int flags);
1017 1019
1018 struct sock * (*accept)(struct sock *sk, int flags, int *err); 1020 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1021 bool kern);
1019 1022
1020 int (*ioctl)(struct sock *sk, int cmd, 1023 int (*ioctl)(struct sock *sk, int cmd,
1021 unsigned long arg); 1024 unsigned long arg);
@@ -1573,7 +1576,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1573int sock_no_bind(struct socket *, struct sockaddr *, int); 1576int sock_no_bind(struct socket *, struct sockaddr *, int);
1574int sock_no_connect(struct socket *, struct sockaddr *, int, int); 1577int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1575int sock_no_socketpair(struct socket *, struct socket *); 1578int sock_no_socketpair(struct socket *, struct socket *);
1576int sock_no_accept(struct socket *, struct socket *, int); 1579int sock_no_accept(struct socket *, struct socket *, int, bool);
1577int sock_no_getname(struct socket *, struct sockaddr *, int *, int); 1580int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
1578unsigned int sock_no_poll(struct file *, struct socket *, 1581unsigned int sock_no_poll(struct file *, struct socket *,
1579 struct poll_table_struct *); 1582 struct poll_table_struct *);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index b0e275de6dec..583875ea136a 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@ struct iscsi_conn {
196 struct iscsi_task *task; /* xmit task in progress */ 196 struct iscsi_task *task; /* xmit task in progress */
197 197
198 /* xmit */ 198 /* xmit */
199 spinlock_t taskqueuelock; /* protects the next three lists */
199 struct list_head mgmtqueue; /* mgmt (control) xmit queue */ 200 struct list_head mgmtqueue; /* mgmt (control) xmit queue */
200 struct list_head cmdqueue; /* data-path cmd queue */ 201 struct list_head cmdqueue; /* data-path cmd queue */
201 struct list_head requeue; /* tasks needing another run */ 202 struct list_head requeue; /* tasks needing another run */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6f22b39f1b0c..080c7ce9bae8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -472,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev)
472 sdev->sdev_state == SDEV_CREATED_BLOCK; 472 sdev->sdev_state == SDEV_CREATED_BLOCK;
473} 473}
474 474
475int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
476int scsi_internal_device_unblock(struct scsi_device *sdev,
477 enum scsi_device_state new_state);
478
475/* accessor functions for the SCSI parameters */ 479/* accessor functions for the SCSI parameters */
476static inline int scsi_device_sync(struct scsi_device *sdev) 480static inline int scsi_device_sync(struct scsi_device *sdev)
477{ 481{
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 14e49c798135..b35533b94277 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
1#undef TRACE_SYSTEM 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM raw_syscalls 2#define TRACE_SYSTEM raw_syscalls
3#undef TRACE_INCLUDE_FILE
3#define TRACE_INCLUDE_FILE syscalls 4#define TRACE_INCLUDE_FILE syscalls
4 5
5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) 6#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 407cb55df6ac..7fb97863c945 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -33,8 +33,8 @@ extern "C" {
33#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ 33#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
34 34
35struct drm_omap_param { 35struct drm_omap_param {
36 uint64_t param; /* in */ 36 __u64 param; /* in */
37 uint64_t value; /* in (set_param), out (get_param) */ 37 __u64 value; /* in (set_param), out (get_param) */
38}; 38};
39 39
40#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ 40#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
53#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) 53#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
54 54
55union omap_gem_size { 55union omap_gem_size {
56 uint32_t bytes; /* (for non-tiled formats) */ 56 __u32 bytes; /* (for non-tiled formats) */
57 struct { 57 struct {
58 uint16_t width; 58 __u16 width;
59 uint16_t height; 59 __u16 height;
60 } tiled; /* (for tiled formats) */ 60 } tiled; /* (for tiled formats) */
61}; 61};
62 62
63struct drm_omap_gem_new { 63struct drm_omap_gem_new {
64 union omap_gem_size size; /* in */ 64 union omap_gem_size size; /* in */
65 uint32_t flags; /* in */ 65 __u32 flags; /* in */
66 uint32_t handle; /* out */ 66 __u32 handle; /* out */
67 uint32_t __pad; 67 __u32 __pad;
68}; 68};
69 69
70/* mask of operations: */ 70/* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
74}; 74};
75 75
76struct drm_omap_gem_cpu_prep { 76struct drm_omap_gem_cpu_prep {
77 uint32_t handle; /* buffer handle (in) */ 77 __u32 handle; /* buffer handle (in) */
78 uint32_t op; /* mask of omap_gem_op (in) */ 78 __u32 op; /* mask of omap_gem_op (in) */
79}; 79};
80 80
81struct drm_omap_gem_cpu_fini { 81struct drm_omap_gem_cpu_fini {
82 uint32_t handle; /* buffer handle (in) */ 82 __u32 handle; /* buffer handle (in) */
83 uint32_t op; /* mask of omap_gem_op (in) */ 83 __u32 op; /* mask of omap_gem_op (in) */
84 /* TODO maybe here we pass down info about what regions are touched 84 /* TODO maybe here we pass down info about what regions are touched
85 * by sw so we can be clever about cache ops? For now a placeholder, 85 * by sw so we can be clever about cache ops? For now a placeholder,
86 * set to zero and we just do full buffer flush.. 86 * set to zero and we just do full buffer flush..
87 */ 87 */
88 uint32_t nregions; 88 __u32 nregions;
89 uint32_t __pad; 89 __u32 __pad;
90}; 90};
91 91
92struct drm_omap_gem_info { 92struct drm_omap_gem_info {
93 uint32_t handle; /* buffer handle (in) */ 93 __u32 handle; /* buffer handle (in) */
94 uint32_t pad; 94 __u32 pad;
95 uint64_t offset; /* mmap offset (out) */ 95 __u64 offset; /* mmap offset (out) */
96 /* note: in case of tiled buffers, the user virtual size can be 96 /* note: in case of tiled buffers, the user virtual size can be
97 * different from the physical size (ie. how many pages are needed 97 * different from the physical size (ie. how many pages are needed
98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. 98 * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
99 * This size here is the one that should be used if you want to 99 * This size here is the one that should be used if you want to
100 * mmap() the buffer: 100 * mmap() the buffer:
101 */ 101 */
102 uint32_t size; /* virtual size for mmap'ing (out) */ 102 __u32 size; /* virtual size for mmap'ing (out) */
103 uint32_t __pad; 103 __u32 __pad;
104}; 104};
105 105
106#define DRM_OMAP_GET_PARAM 0x00 106#define DRM_OMAP_GET_PARAM 0x00
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index d08c63f3dd6f..0c5d5dd61b6a 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -64,7 +64,7 @@ struct packet_diag_mclist {
64 __u32 pdmc_count; 64 __u32 pdmc_count;
65 __u16 pdmc_type; 65 __u16 pdmc_type;
66 __u16 pdmc_alen; 66 __u16 pdmc_alen;
67 __u8 pdmc_addr[MAX_ADDR_LEN]; 67 __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */
68}; 68};
69 69
70struct packet_diag_ring { 70struct packet_diag_ring {
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index c055947c5c98..3b059530dac9 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -18,8 +18,7 @@
18 * means the userland is reading). 18 * means the userland is reading).
19 */ 19 */
20#define UFFD_API ((__u64)0xAA) 20#define UFFD_API ((__u64)0xAA)
21#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_EXIT | \ 21#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \
22 UFFD_FEATURE_EVENT_FORK | \
23 UFFD_FEATURE_EVENT_REMAP | \ 22 UFFD_FEATURE_EVENT_REMAP | \
24 UFFD_FEATURE_EVENT_REMOVE | \ 23 UFFD_FEATURE_EVENT_REMOVE | \
25 UFFD_FEATURE_EVENT_UNMAP | \ 24 UFFD_FEATURE_EVENT_UNMAP | \
@@ -113,7 +112,6 @@ struct uffd_msg {
113#define UFFD_EVENT_REMAP 0x14 112#define UFFD_EVENT_REMAP 0x14
114#define UFFD_EVENT_REMOVE 0x15 113#define UFFD_EVENT_REMOVE 0x15
115#define UFFD_EVENT_UNMAP 0x16 114#define UFFD_EVENT_UNMAP 0x16
116#define UFFD_EVENT_EXIT 0x17
117 115
118/* flags for UFFD_EVENT_PAGEFAULT */ 116/* flags for UFFD_EVENT_PAGEFAULT */
119#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ 117#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
@@ -163,7 +161,6 @@ struct uffdio_api {
163#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4) 161#define UFFD_FEATURE_MISSING_HUGETLBFS (1<<4)
164#define UFFD_FEATURE_MISSING_SHMEM (1<<5) 162#define UFFD_FEATURE_MISSING_SHMEM (1<<5)
165#define UFFD_FEATURE_EVENT_UNMAP (1<<6) 163#define UFFD_FEATURE_EVENT_UNMAP (1<<6)
166#define UFFD_FEATURE_EVENT_EXIT (1<<7)
167 __u64 features; 164 __u64 features;
168 165
169 __u64 ioctls; 166 __u64 ioctls;
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index a0083be5d529..1f6d78f044b6 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -2,6 +2,7 @@
2#define __LINUX_SWIOTLB_XEN_H 2#define __LINUX_SWIOTLB_XEN_H
3 3
4#include <linux/dma-direction.h> 4#include <linux/dma-direction.h>
5#include <linux/scatterlist.h>
5#include <linux/swiotlb.h> 6#include <linux/swiotlb.h>
6 7
7extern int xen_swiotlb_init(int verbose, bool early); 8extern int xen_swiotlb_init(int verbose, bool early);
@@ -55,4 +56,14 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
55 56
56extern int 57extern int
57xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask); 58xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
59
60extern int
61xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
62 void *cpu_addr, dma_addr_t dma_addr, size_t size,
63 unsigned long attrs);
64
65extern int
66xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
67 void *cpu_addr, dma_addr_t handle, size_t size,
68 unsigned long attrs);
58#endif /* __LINUX_SWIOTLB_XEN_H */ 69#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/init/main.c b/init/main.c
index eae2f15657c6..f9c9d9948203 100644
--- a/init/main.c
+++ b/init/main.c
@@ -882,7 +882,6 @@ static void __init do_basic_setup(void)
882 do_ctors(); 882 do_ctors();
883 usermodehelper_enable(); 883 usermodehelper_enable();
884 do_initcalls(); 884 do_initcalls();
885 random_int_secret_init();
886} 885}
887 886
888static void __init do_pre_smp_initcalls(void) 887static void __init do_pre_smp_initcalls(void)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ea87fb19a94..afe5bab376c9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,11 +13,12 @@
13#include <linux/bpf.h> 13#include <linux/bpf.h>
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/filter.h> 15#include <linux/filter.h>
16#include <linux/rculist_nulls.h>
16#include "percpu_freelist.h" 17#include "percpu_freelist.h"
17#include "bpf_lru_list.h" 18#include "bpf_lru_list.h"
18 19
19struct bucket { 20struct bucket {
20 struct hlist_head head; 21 struct hlist_nulls_head head;
21 raw_spinlock_t lock; 22 raw_spinlock_t lock;
22}; 23};
23 24
@@ -44,9 +45,14 @@ enum extra_elem_state {
44/* each htab element is struct htab_elem + key + value */ 45/* each htab element is struct htab_elem + key + value */
45struct htab_elem { 46struct htab_elem {
46 union { 47 union {
47 struct hlist_node hash_node; 48 struct hlist_nulls_node hash_node;
48 struct bpf_htab *htab; 49 struct {
49 struct pcpu_freelist_node fnode; 50 void *padding;
51 union {
52 struct bpf_htab *htab;
53 struct pcpu_freelist_node fnode;
54 };
55 };
50 }; 56 };
51 union { 57 union {
52 struct rcu_head rcu; 58 struct rcu_head rcu;
@@ -162,7 +168,8 @@ skip_percpu_elems:
162 offsetof(struct htab_elem, lru_node), 168 offsetof(struct htab_elem, lru_node),
163 htab->elem_size, htab->map.max_entries); 169 htab->elem_size, htab->map.max_entries);
164 else 170 else
165 pcpu_freelist_populate(&htab->freelist, htab->elems, 171 pcpu_freelist_populate(&htab->freelist,
172 htab->elems + offsetof(struct htab_elem, fnode),
166 htab->elem_size, htab->map.max_entries); 173 htab->elem_size, htab->map.max_entries);
167 174
168 return 0; 175 return 0;
@@ -217,6 +224,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
217 int err, i; 224 int err, i;
218 u64 cost; 225 u64 cost;
219 226
227 BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
228 offsetof(struct htab_elem, hash_node.pprev));
229 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
230 offsetof(struct htab_elem, hash_node.pprev));
231
220 if (lru && !capable(CAP_SYS_ADMIN)) 232 if (lru && !capable(CAP_SYS_ADMIN))
221 /* LRU implementation is much complicated than other 233 /* LRU implementation is much complicated than other
222 * maps. Hence, limit to CAP_SYS_ADMIN for now. 234 * maps. Hence, limit to CAP_SYS_ADMIN for now.
@@ -326,7 +338,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
326 goto free_htab; 338 goto free_htab;
327 339
328 for (i = 0; i < htab->n_buckets; i++) { 340 for (i = 0; i < htab->n_buckets; i++) {
329 INIT_HLIST_HEAD(&htab->buckets[i].head); 341 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
330 raw_spin_lock_init(&htab->buckets[i].lock); 342 raw_spin_lock_init(&htab->buckets[i].lock);
331 } 343 }
332 344
@@ -366,20 +378,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
366 return &htab->buckets[hash & (htab->n_buckets - 1)]; 378 return &htab->buckets[hash & (htab->n_buckets - 1)];
367} 379}
368 380
369static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) 381static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
370{ 382{
371 return &__select_bucket(htab, hash)->head; 383 return &__select_bucket(htab, hash)->head;
372} 384}
373 385
374static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, 386/* this lookup function can only be called with bucket lock taken */
387static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
375 void *key, u32 key_size) 388 void *key, u32 key_size)
376{ 389{
390 struct hlist_nulls_node *n;
391 struct htab_elem *l;
392
393 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
394 if (l->hash == hash && !memcmp(&l->key, key, key_size))
395 return l;
396
397 return NULL;
398}
399
400/* can be called without bucket lock. it will repeat the loop in
401 * the unlikely event when elements moved from one bucket into another
402 * while link list is being walked
403 */
404static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
405 u32 hash, void *key,
406 u32 key_size, u32 n_buckets)
407{
408 struct hlist_nulls_node *n;
377 struct htab_elem *l; 409 struct htab_elem *l;
378 410
379 hlist_for_each_entry_rcu(l, head, hash_node) 411again:
412 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
380 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 413 if (l->hash == hash && !memcmp(&l->key, key, key_size))
381 return l; 414 return l;
382 415
416 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
417 goto again;
418
383 return NULL; 419 return NULL;
384} 420}
385 421
@@ -387,7 +423,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
387static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 423static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
388{ 424{
389 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 425 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
390 struct hlist_head *head; 426 struct hlist_nulls_head *head;
391 struct htab_elem *l; 427 struct htab_elem *l;
392 u32 hash, key_size; 428 u32 hash, key_size;
393 429
@@ -400,7 +436,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
400 436
401 head = select_bucket(htab, hash); 437 head = select_bucket(htab, hash);
402 438
403 l = lookup_elem_raw(head, hash, key, key_size); 439 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
404 440
405 return l; 441 return l;
406} 442}
@@ -433,8 +469,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
433static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 469static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
434{ 470{
435 struct bpf_htab *htab = (struct bpf_htab *)arg; 471 struct bpf_htab *htab = (struct bpf_htab *)arg;
436 struct htab_elem *l, *tgt_l; 472 struct htab_elem *l = NULL, *tgt_l;
437 struct hlist_head *head; 473 struct hlist_nulls_head *head;
474 struct hlist_nulls_node *n;
438 unsigned long flags; 475 unsigned long flags;
439 struct bucket *b; 476 struct bucket *b;
440 477
@@ -444,9 +481,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
444 481
445 raw_spin_lock_irqsave(&b->lock, flags); 482 raw_spin_lock_irqsave(&b->lock, flags);
446 483
447 hlist_for_each_entry_rcu(l, head, hash_node) 484 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
448 if (l == tgt_l) { 485 if (l == tgt_l) {
449 hlist_del_rcu(&l->hash_node); 486 hlist_nulls_del_rcu(&l->hash_node);
450 break; 487 break;
451 } 488 }
452 489
@@ -459,7 +496,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
459static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 496static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
460{ 497{
461 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 498 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
462 struct hlist_head *head; 499 struct hlist_nulls_head *head;
463 struct htab_elem *l, *next_l; 500 struct htab_elem *l, *next_l;
464 u32 hash, key_size; 501 u32 hash, key_size;
465 int i; 502 int i;
@@ -473,7 +510,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
473 head = select_bucket(htab, hash); 510 head = select_bucket(htab, hash);
474 511
475 /* lookup the key */ 512 /* lookup the key */
476 l = lookup_elem_raw(head, hash, key, key_size); 513 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
477 514
478 if (!l) { 515 if (!l) {
479 i = 0; 516 i = 0;
@@ -481,7 +518,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
481 } 518 }
482 519
483 /* key was found, get next key in the same bucket */ 520 /* key was found, get next key in the same bucket */
484 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), 521 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
485 struct htab_elem, hash_node); 522 struct htab_elem, hash_node);
486 523
487 if (next_l) { 524 if (next_l) {
@@ -500,7 +537,7 @@ find_first_elem:
500 head = select_bucket(htab, i); 537 head = select_bucket(htab, i);
501 538
502 /* pick first element in the bucket */ 539 /* pick first element in the bucket */
503 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 540 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
504 struct htab_elem, hash_node); 541 struct htab_elem, hash_node);
505 if (next_l) { 542 if (next_l) {
506 /* if it's not empty, just return it */ 543 /* if it's not empty, just return it */
@@ -582,9 +619,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
582 int err = 0; 619 int err = 0;
583 620
584 if (prealloc) { 621 if (prealloc) {
585 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); 622 struct pcpu_freelist_node *l;
586 if (!l_new) 623
624 l = pcpu_freelist_pop(&htab->freelist);
625 if (!l)
587 err = -E2BIG; 626 err = -E2BIG;
627 else
628 l_new = container_of(l, struct htab_elem, fnode);
588 } else { 629 } else {
589 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 630 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
590 atomic_dec(&htab->count); 631 atomic_dec(&htab->count);
@@ -661,7 +702,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
661{ 702{
662 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 703 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
663 struct htab_elem *l_new = NULL, *l_old; 704 struct htab_elem *l_new = NULL, *l_old;
664 struct hlist_head *head; 705 struct hlist_nulls_head *head;
665 unsigned long flags; 706 unsigned long flags;
666 struct bucket *b; 707 struct bucket *b;
667 u32 key_size, hash; 708 u32 key_size, hash;
@@ -700,9 +741,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
700 /* add new element to the head of the list, so that 741 /* add new element to the head of the list, so that
701 * concurrent search will find it before old elem 742 * concurrent search will find it before old elem
702 */ 743 */
703 hlist_add_head_rcu(&l_new->hash_node, head); 744 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
704 if (l_old) { 745 if (l_old) {
705 hlist_del_rcu(&l_old->hash_node); 746 hlist_nulls_del_rcu(&l_old->hash_node);
706 free_htab_elem(htab, l_old); 747 free_htab_elem(htab, l_old);
707 } 748 }
708 ret = 0; 749 ret = 0;
@@ -716,7 +757,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
716{ 757{
717 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 758 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
718 struct htab_elem *l_new, *l_old = NULL; 759 struct htab_elem *l_new, *l_old = NULL;
719 struct hlist_head *head; 760 struct hlist_nulls_head *head;
720 unsigned long flags; 761 unsigned long flags;
721 struct bucket *b; 762 struct bucket *b;
722 u32 key_size, hash; 763 u32 key_size, hash;
@@ -757,10 +798,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
757 /* add new element to the head of the list, so that 798 /* add new element to the head of the list, so that
758 * concurrent search will find it before old elem 799 * concurrent search will find it before old elem
759 */ 800 */
760 hlist_add_head_rcu(&l_new->hash_node, head); 801 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
761 if (l_old) { 802 if (l_old) {
762 bpf_lru_node_set_ref(&l_new->lru_node); 803 bpf_lru_node_set_ref(&l_new->lru_node);
763 hlist_del_rcu(&l_old->hash_node); 804 hlist_nulls_del_rcu(&l_old->hash_node);
764 } 805 }
765 ret = 0; 806 ret = 0;
766 807
@@ -781,7 +822,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
781{ 822{
782 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 823 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
783 struct htab_elem *l_new = NULL, *l_old; 824 struct htab_elem *l_new = NULL, *l_old;
784 struct hlist_head *head; 825 struct hlist_nulls_head *head;
785 unsigned long flags; 826 unsigned long flags;
786 struct bucket *b; 827 struct bucket *b;
787 u32 key_size, hash; 828 u32 key_size, hash;
@@ -820,7 +861,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
820 ret = PTR_ERR(l_new); 861 ret = PTR_ERR(l_new);
821 goto err; 862 goto err;
822 } 863 }
823 hlist_add_head_rcu(&l_new->hash_node, head); 864 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
824 } 865 }
825 ret = 0; 866 ret = 0;
826err: 867err:
@@ -834,7 +875,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
834{ 875{
835 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 876 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
836 struct htab_elem *l_new = NULL, *l_old; 877 struct htab_elem *l_new = NULL, *l_old;
837 struct hlist_head *head; 878 struct hlist_nulls_head *head;
838 unsigned long flags; 879 unsigned long flags;
839 struct bucket *b; 880 struct bucket *b;
840 u32 key_size, hash; 881 u32 key_size, hash;
@@ -882,7 +923,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
882 } else { 923 } else {
883 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), 924 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
884 value, onallcpus); 925 value, onallcpus);
885 hlist_add_head_rcu(&l_new->hash_node, head); 926 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
886 l_new = NULL; 927 l_new = NULL;
887 } 928 }
888 ret = 0; 929 ret = 0;
@@ -910,7 +951,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
910static int htab_map_delete_elem(struct bpf_map *map, void *key) 951static int htab_map_delete_elem(struct bpf_map *map, void *key)
911{ 952{
912 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 953 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
913 struct hlist_head *head; 954 struct hlist_nulls_head *head;
914 struct bucket *b; 955 struct bucket *b;
915 struct htab_elem *l; 956 struct htab_elem *l;
916 unsigned long flags; 957 unsigned long flags;
@@ -930,7 +971,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
930 l = lookup_elem_raw(head, hash, key, key_size); 971 l = lookup_elem_raw(head, hash, key, key_size);
931 972
932 if (l) { 973 if (l) {
933 hlist_del_rcu(&l->hash_node); 974 hlist_nulls_del_rcu(&l->hash_node);
934 free_htab_elem(htab, l); 975 free_htab_elem(htab, l);
935 ret = 0; 976 ret = 0;
936 } 977 }
@@ -942,7 +983,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
942static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 983static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
943{ 984{
944 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 985 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
945 struct hlist_head *head; 986 struct hlist_nulls_head *head;
946 struct bucket *b; 987 struct bucket *b;
947 struct htab_elem *l; 988 struct htab_elem *l;
948 unsigned long flags; 989 unsigned long flags;
@@ -962,7 +1003,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
962 l = lookup_elem_raw(head, hash, key, key_size); 1003 l = lookup_elem_raw(head, hash, key, key_size);
963 1004
964 if (l) { 1005 if (l) {
965 hlist_del_rcu(&l->hash_node); 1006 hlist_nulls_del_rcu(&l->hash_node);
966 ret = 0; 1007 ret = 0;
967 } 1008 }
968 1009
@@ -977,12 +1018,12 @@ static void delete_all_elements(struct bpf_htab *htab)
977 int i; 1018 int i;
978 1019
979 for (i = 0; i < htab->n_buckets; i++) { 1020 for (i = 0; i < htab->n_buckets; i++) {
980 struct hlist_head *head = select_bucket(htab, i); 1021 struct hlist_nulls_head *head = select_bucket(htab, i);
981 struct hlist_node *n; 1022 struct hlist_nulls_node *n;
982 struct htab_elem *l; 1023 struct htab_elem *l;
983 1024
984 hlist_for_each_entry_safe(l, n, head, hash_node) { 1025 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
985 hlist_del_rcu(&l->hash_node); 1026 hlist_nulls_del_rcu(&l->hash_node);
986 if (l->state != HTAB_EXTRA_ELEM_USED) 1027 if (l->state != HTAB_EXTRA_ELEM_USED)
987 htab_elem_free(htab, l); 1028 htab_elem_free(htab, l);
988 } 1029 }
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 8bfe0afaee10..b37bd9ab7f57 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -500,9 +500,15 @@ unlock:
500 raw_spin_unlock(&trie->lock); 500 raw_spin_unlock(&trie->lock);
501} 501}
502 502
503static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
504{
505 return -ENOTSUPP;
506}
507
503static const struct bpf_map_ops trie_ops = { 508static const struct bpf_map_ops trie_ops = {
504 .map_alloc = trie_alloc, 509 .map_alloc = trie_alloc,
505 .map_free = trie_free, 510 .map_free = trie_free,
511 .map_get_next_key = trie_get_next_key,
506 .map_lookup_elem = trie_lookup_elem, 512 .map_lookup_elem = trie_lookup_elem,
507 .map_update_elem = trie_update_elem, 513 .map_update_elem = trie_update_elem,
508 .map_delete_elem = trie_delete_elem, 514 .map_delete_elem = trie_delete_elem,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 56eba9caa632..1dc22f6b49f5 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1329,7 +1329,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
1329 struct task_struct *task; 1329 struct task_struct *task;
1330 int count = 0; 1330 int count = 0;
1331 1331
1332 seq_printf(seq, "css_set %p\n", cset); 1332 seq_printf(seq, "css_set %pK\n", cset);
1333 1333
1334 list_for_each_entry(task, &cset->tasks, cg_list) { 1334 list_for_each_entry(task, &cset->tasks, cg_list) {
1335 if (count++ > MAX_TASKS_SHOWN_PER_CSS) 1335 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0125589c7428..48851327a15e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
2669 * 2669 *
2670 * Returns 0 on success, -errno on failure. On failure, csses which have 2670 * Returns 0 on success, -errno on failure. On failure, csses which have
2671 * been processed already aren't cleaned up. The caller is responsible for 2671 * been processed already aren't cleaned up. The caller is responsible for
2672 * cleaning up with cgroup_apply_control_disble(). 2672 * cleaning up with cgroup_apply_control_disable().
2673 */ 2673 */
2674static int cgroup_apply_control_enable(struct cgroup *cgrp) 2674static int cgroup_apply_control_enable(struct cgroup *cgrp)
2675{ 2675{
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index e756dae49300..2237201d66d5 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
229 /* Only log the first time events_limit is incremented. */ 229 /* Only log the first time events_limit is incremented. */
230 if (atomic64_inc_return(&pids->events_limit) == 1) { 230 if (atomic64_inc_return(&pids->events_limit) == 1) {
231 pr_info("cgroup: fork rejected by pids controller in "); 231 pr_info("cgroup: fork rejected by pids controller in ");
232 pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); 232 pr_cont_cgroup_path(css->cgroup);
233 pr_cont("\n"); 233 pr_cont("\n");
234 } 234 }
235 cgroup_file_notify(&pids->events_file); 235 cgroup_file_notify(&pids->events_file);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6f41548f2e32..a17ed56c8ce1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -998,7 +998,7 @@ list_update_cgroup_event(struct perf_event *event,
998 */ 998 */
999#define PERF_CPU_HRTIMER (1000 / HZ) 999#define PERF_CPU_HRTIMER (1000 / HZ)
1000/* 1000/*
1001 * function must be called with interrupts disbled 1001 * function must be called with interrupts disabled
1002 */ 1002 */
1003static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 1003static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1004{ 1004{
diff --git a/kernel/exit.c b/kernel/exit.c
index e126ebf2400c..516acdb0e0ec 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -554,7 +554,6 @@ static void exit_mm(void)
554 enter_lazy_tlb(mm, current); 554 enter_lazy_tlb(mm, current);
555 task_unlock(current); 555 task_unlock(current);
556 mm_update_next_owner(mm); 556 mm_update_next_owner(mm);
557 userfaultfd_exit(mm);
558 mmput(mm); 557 mmput(mm);
559 if (test_thread_flag(TIF_MEMDIE)) 558 if (test_thread_flag(TIF_MEMDIE))
560 exit_oom_victim(); 559 exit_oom_victim();
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index b56a558e406d..b118735fea9d 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image)
614 ret = crypto_shash_final(desc, digest); 614 ret = crypto_shash_final(desc, digest);
615 if (ret) 615 if (ret)
616 goto out_free_digest; 616 goto out_free_digest;
617 ret = kexec_purgatory_get_set_symbol(image, "sha_regions", 617 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
618 sha_regions, sha_region_sz, 0); 618 sha_regions, sha_region_sz, 0);
619 if (ret) 619 if (ret)
620 goto out_free_digest; 620 goto out_free_digest;
621 621
622 ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", 622 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
623 digest, SHA256_DIGEST_SIZE, 0); 623 digest, SHA256_DIGEST_SIZE, 0);
624 if (ret) 624 if (ret)
625 goto out_free_digest; 625 goto out_free_digest;
626 } 626 }
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 4cef7e4706b0..799a8a452187 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image,
15extern struct mutex kexec_mutex; 15extern struct mutex kexec_mutex;
16 16
17#ifdef CONFIG_KEXEC_FILE 17#ifdef CONFIG_KEXEC_FILE
18struct kexec_sha_region { 18#include <linux/purgatory.h>
19 unsigned long start;
20 unsigned long len;
21};
22
23void kimage_file_post_load_cleanup(struct kimage *image); 19void kimage_file_post_load_cleanup(struct kimage *image);
24#else /* CONFIG_KEXEC_FILE */ 20#else /* CONFIG_KEXEC_FILE */
25static inline void kimage_file_post_load_cleanup(struct kimage *image) { } 21static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 12e38c213b70..a95e5d1f4a9c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3262 if (depth) { 3262 if (depth) {
3263 hlock = curr->held_locks + depth - 1; 3263 hlock = curr->held_locks + depth - 1;
3264 if (hlock->class_idx == class_idx && nest_lock) { 3264 if (hlock->class_idx == class_idx && nest_lock) {
3265 if (hlock->references) 3265 if (hlock->references) {
3266 /*
3267 * Check: unsigned int references:12, overflow.
3268 */
3269 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3270 return 0;
3271
3266 hlock->references++; 3272 hlock->references++;
3267 else 3273 } else {
3268 hlock->references = 2; 3274 hlock->references = 2;
3275 }
3269 3276
3270 return 1; 3277 return 1;
3271 } 3278 }
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index da6c9a34f62f..6b7abb334ca6 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work)
50 50
51 if (mtx->flags & TEST_MTX_TRY) { 51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex)) 52 while (!ww_mutex_trylock(&mtx->mutex))
53 cpu_relax(); 53 cond_resched();
54 } else { 54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL); 55 ww_mutex_lock(&mtx->mutex, NULL);
56 } 56 }
@@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags)
88 ret = -EINVAL; 88 ret = -EINVAL;
89 break; 89 break;
90 } 90 }
91 cpu_relax(); 91 cond_resched();
92 } while (time_before(jiffies, timeout)); 92 } while (time_before(jiffies, timeout));
93 } else { 93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); 94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void)
627 if (ret) 627 if (ret)
628 return ret; 628 return ret;
629 629
630 ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); 630 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
631 if (ret) 631 if (ret)
632 return ret; 632 return ret;
633 633
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 06123234f118..07e85e5229da 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
247 align_start = res->start & ~(SECTION_SIZE - 1); 247 align_start = res->start & ~(SECTION_SIZE - 1);
248 align_size = ALIGN(resource_size(res), SECTION_SIZE); 248 align_size = ALIGN(resource_size(res), SECTION_SIZE);
249 249
250 lock_device_hotplug();
251 mem_hotplug_begin(); 250 mem_hotplug_begin();
252 arch_remove_memory(align_start, align_size); 251 arch_remove_memory(align_start, align_size);
253 mem_hotplug_done(); 252 mem_hotplug_done();
254 unlock_device_hotplug();
255 253
256 untrack_pfn(NULL, PHYS_PFN(align_start), align_size); 254 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
257 pgmap_radix_release(res); 255 pgmap_radix_release(res);
@@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
364 if (error) 362 if (error)
365 goto err_pfn_remap; 363 goto err_pfn_remap;
366 364
367 lock_device_hotplug();
368 mem_hotplug_begin(); 365 mem_hotplug_begin();
369 error = arch_add_memory(nid, align_start, align_size, true); 366 error = arch_add_memory(nid, align_start, align_size, true);
370 mem_hotplug_done(); 367 mem_hotplug_done();
371 unlock_device_hotplug();
372 if (error) 368 if (error)
373 goto err_add_memory; 369 goto err_add_memory;
374 370
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 956383844116..3b31fc05a0f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3287 struct task_struct *p; 3287 struct task_struct *p;
3288 3288
3289 /* 3289 /*
3290 * Optimization: we know that if all tasks are in 3290 * Optimization: we know that if all tasks are in the fair class we can
3291 * the fair class we can call that function directly: 3291 * call that function directly, but only if the @prev task wasn't of a
3292 * higher scheduling class, because otherwise those loose the
3293 * opportunity to pull in more work from other CPUs.
3292 */ 3294 */
3293 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 3295 if (likely((prev->sched_class == &idle_sched_class ||
3296 prev->sched_class == &fair_sched_class) &&
3297 rq->nr_running == rq->cfs.h_nr_running)) {
3298
3294 p = fair_sched_class.pick_next_task(rq, prev, rf); 3299 p = fair_sched_class.pick_next_task(rq, prev, rf);
3295 if (unlikely(p == RETRY_TASK)) 3300 if (unlikely(p == RETRY_TASK))
3296 goto again; 3301 goto again;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 8f8de3d4d6b7..cd7cd489f739 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,7 @@ struct sugov_policy {
36 u64 last_freq_update_time; 36 u64 last_freq_update_time;
37 s64 freq_update_delay_ns; 37 s64 freq_update_delay_ns;
38 unsigned int next_freq; 38 unsigned int next_freq;
39 unsigned int cached_raw_freq;
39 40
40 /* The next fields are only needed if fast switch cannot be used. */ 41 /* The next fields are only needed if fast switch cannot be used. */
41 struct irq_work irq_work; 42 struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
52 struct update_util_data update_util; 53 struct update_util_data update_util;
53 struct sugov_policy *sg_policy; 54 struct sugov_policy *sg_policy;
54 55
55 unsigned int cached_raw_freq;
56 unsigned long iowait_boost; 56 unsigned long iowait_boost;
57 unsigned long iowait_boost_max; 57 unsigned long iowait_boost_max;
58 u64 last_update; 58 u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
116 116
117/** 117/**
118 * get_next_freq - Compute a new frequency for a given cpufreq policy. 118 * get_next_freq - Compute a new frequency for a given cpufreq policy.
119 * @sg_cpu: schedutil cpu object to compute the new frequency for. 119 * @sg_policy: schedutil policy object to compute the new frequency for.
120 * @util: Current CPU utilization. 120 * @util: Current CPU utilization.
121 * @max: CPU capacity. 121 * @max: CPU capacity.
122 * 122 *
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
136 * next_freq (as calculated above) is returned, subject to policy min/max and 136 * next_freq (as calculated above) is returned, subject to policy min/max and
137 * cpufreq driver limitations. 137 * cpufreq driver limitations.
138 */ 138 */
139static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, 139static unsigned int get_next_freq(struct sugov_policy *sg_policy,
140 unsigned long max) 140 unsigned long util, unsigned long max)
141{ 141{
142 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
143 struct cpufreq_policy *policy = sg_policy->policy; 142 struct cpufreq_policy *policy = sg_policy->policy;
144 unsigned int freq = arch_scale_freq_invariant() ? 143 unsigned int freq = arch_scale_freq_invariant() ?
145 policy->cpuinfo.max_freq : policy->cur; 144 policy->cpuinfo.max_freq : policy->cur;
146 145
147 freq = (freq + (freq >> 2)) * util / max; 146 freq = (freq + (freq >> 2)) * util / max;
148 147
149 if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 148 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
150 return sg_policy->next_freq; 149 return sg_policy->next_freq;
151 sg_cpu->cached_raw_freq = freq; 150 sg_policy->cached_raw_freq = freq;
152 return cpufreq_driver_resolve_freq(policy, freq); 151 return cpufreq_driver_resolve_freq(policy, freq);
153} 152}
154 153
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
213 } else { 212 } else {
214 sugov_get_util(&util, &max); 213 sugov_get_util(&util, &max);
215 sugov_iowait_boost(sg_cpu, &util, &max); 214 sugov_iowait_boost(sg_cpu, &util, &max);
216 next_f = get_next_freq(sg_cpu, util, max); 215 next_f = get_next_freq(sg_policy, util, max);
217 } 216 }
218 sugov_update_commit(sg_policy, time, next_f); 217 sugov_update_commit(sg_policy, time, next_f);
219} 218}
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
267 sugov_iowait_boost(j_sg_cpu, &util, &max); 266 sugov_iowait_boost(j_sg_cpu, &util, &max);
268 } 267 }
269 268
270 return get_next_freq(sg_cpu, util, max); 269 return get_next_freq(sg_policy, util, max);
271} 270}
272 271
273static void sugov_update_shared(struct update_util_data *hook, u64 time, 272static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy)
580 sg_policy->next_freq = UINT_MAX; 579 sg_policy->next_freq = UINT_MAX;
581 sg_policy->work_in_progress = false; 580 sg_policy->work_in_progress = false;
582 sg_policy->need_freq_update = false; 581 sg_policy->need_freq_update = false;
582 sg_policy->cached_raw_freq = 0;
583 583
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy)
590 sg_cpu->max = 0; 590 sg_cpu->max = 0;
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 591 sg_cpu->flags = SCHED_CPUFREQ_RT;
592 sg_cpu->last_update = 0; 592 sg_cpu->last_update = 0;
593 sg_cpu->cached_raw_freq = 0;
594 sg_cpu->iowait_boost = 0; 593 sg_cpu->iowait_boost = 0;
595 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
596 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..dea138964b91 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5799 * Due to large variance we need a large fuzz factor; hackbench in 5799 * Due to large variance we need a large fuzz factor; hackbench in
5800 * particularly is sensitive here. 5800 * particularly is sensitive here.
5801 */ 5801 */
5802 if ((avg_idle / 512) < avg_cost) 5802 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5803 return -1; 5803 return -1;
5804 5804
5805 time = local_clock(); 5805 time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..1b3c8189b286 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
51 */ 51 */
52SCHED_FEAT(TTWU_QUEUE, true) 52SCHED_FEAT(TTWU_QUEUE, true)
53 53
54/*
55 * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
56 */
57SCHED_FEAT(SIS_AVG_CPU, false)
58
54#ifdef HAVE_RT_PUSH_IPI 59#ifdef HAVE_RT_PUSH_IPI
55/* 60/*
56 * In order to avoid a thundering herd attack of CPUs that are 61 * In order to avoid a thundering herd attack of CPUs that are
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4d2ea6f25568..b8c84c6dee64 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
242} 242}
243EXPORT_SYMBOL(prepare_to_wait_event); 243EXPORT_SYMBOL(prepare_to_wait_event);
244 244
245/*
246 * Note! These two wait functions are entered with the
247 * wait-queue lock held (and interrupts off in the _irq
248 * case), so there is no race with testing the wakeup
249 * condition in the caller before they add the wait
250 * entry to the wake queue.
251 */
252int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
253{
254 if (likely(list_empty(&wait->task_list)))
255 __add_wait_queue_tail(wq, wait);
256
257 set_current_state(TASK_INTERRUPTIBLE);
258 if (signal_pending(current))
259 return -ERESTARTSYS;
260
261 spin_unlock(&wq->lock);
262 schedule();
263 spin_lock(&wq->lock);
264 return 0;
265}
266EXPORT_SYMBOL(do_wait_intr);
267
268int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
269{
270 if (likely(list_empty(&wait->task_list)))
271 __add_wait_queue_tail(wq, wait);
272
273 set_current_state(TASK_INTERRUPTIBLE);
274 if (signal_pending(current))
275 return -ERESTARTSYS;
276
277 spin_unlock_irq(&wq->lock);
278 schedule();
279 spin_lock_irq(&wq->lock);
280 return 0;
281}
282EXPORT_SYMBOL(do_wait_intr_irq);
283
245/** 284/**
246 * finish_wait - clean up after waiting in a queue 285 * finish_wait - clean up after waiting in a queue
247 * @q: waitqueue waited on 286 * @q: waitqueue waited on
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7906b3f0c41a..497719127bf9 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
125 shift_hz += cycles_per_tick/2; 125 shift_hz += cycles_per_tick/2;
126 do_div(shift_hz, cycles_per_tick); 126 do_div(shift_hz, cycles_per_tick);
127 /* Calculate nsec_per_tick using shift_hz */ 127 /* Calculate nsec_per_tick using shift_hz */
128 nsec_per_tick = (u64)TICK_NSEC << 8; 128 nsec_per_tick = (u64)NSEC_PER_SEC << 8;
129 nsec_per_tick += (u32)shift_hz/2; 129 nsec_per_tick += (u32)shift_hz/2;
130 do_div(nsec_per_tick, (u32)shift_hz); 130 do_div(nsec_per_tick, (u32)shift_hz);
131 131
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d5038005eb5d..d4a06e714645 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -429,7 +429,7 @@ config BLK_DEV_IO_TRACE
429 429
430 If unsure, say N. 430 If unsure, say N.
431 431
432config KPROBE_EVENT 432config KPROBE_EVENTS
433 depends on KPROBES 433 depends on KPROBES
434 depends on HAVE_REGS_AND_STACK_ACCESS_API 434 depends on HAVE_REGS_AND_STACK_ACCESS_API
435 bool "Enable kprobes-based dynamic events" 435 bool "Enable kprobes-based dynamic events"
@@ -447,7 +447,7 @@ config KPROBE_EVENT
447 This option is also required by perf-probe subcommand of perf tools. 447 This option is also required by perf-probe subcommand of perf tools.
448 If you want to use perf tools, this option is strongly recommended. 448 If you want to use perf tools, this option is strongly recommended.
449 449
450config UPROBE_EVENT 450config UPROBE_EVENTS
451 bool "Enable uprobes-based dynamic events" 451 bool "Enable uprobes-based dynamic events"
452 depends on ARCH_SUPPORTS_UPROBES 452 depends on ARCH_SUPPORTS_UPROBES
453 depends on MMU 453 depends on MMU
@@ -466,7 +466,7 @@ config UPROBE_EVENT
466 466
467config BPF_EVENTS 467config BPF_EVENTS
468 depends on BPF_SYSCALL 468 depends on BPF_SYSCALL
469 depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS 469 depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
470 bool 470 bool
471 default y 471 default y
472 help 472 help
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index e57980845549..90f2701d92a7 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -57,7 +57,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
57obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
58obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o 58obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
59obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o 59obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
60obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 60obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
61obj-$(CONFIG_TRACEPOINTS) += power-traces.o 61obj-$(CONFIG_TRACEPOINTS) += power-traces.o
62ifeq ($(CONFIG_PM),y) 62ifeq ($(CONFIG_PM),y)
63obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o 63obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
@@ -66,7 +66,7 @@ ifeq ($(CONFIG_TRACING),y)
66obj-$(CONFIG_KGDB_KDB) += trace_kdb.o 66obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
67endif 67endif
68obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o 68obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
69obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o 69obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
70 70
71obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o 71obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
72 72
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d1597c9ee30..b9691ee8f6c1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str)
4416} 4416}
4417__setup("ftrace_graph_notrace=", set_graph_notrace_function); 4417__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4418 4418
4419static int __init set_graph_max_depth_function(char *str)
4420{
4421 if (!str)
4422 return 0;
4423 fgraph_max_depth = simple_strtoul(str, NULL, 0);
4424 return 1;
4425}
4426__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4427
4419static void __init set_ftrace_early_graph(char *buf, int enable) 4428static void __init set_ftrace_early_graph(char *buf, int enable)
4420{ 4429{
4421 int ret; 4430 int ret;
4422 char *func; 4431 char *func;
4423 struct ftrace_hash *hash; 4432 struct ftrace_hash *hash;
4424 4433
4425 if (enable) 4434 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4426 hash = ftrace_graph_hash; 4435 if (WARN_ON(!hash))
4427 else 4436 return;
4428 hash = ftrace_graph_notrace_hash;
4429 4437
4430 while (buf) { 4438 while (buf) {
4431 func = strsep(&buf, ","); 4439 func = strsep(&buf, ",");
@@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
4435 printk(KERN_DEBUG "ftrace: function %s not " 4443 printk(KERN_DEBUG "ftrace: function %s not "
4436 "traceable\n", func); 4444 "traceable\n", func);
4437 } 4445 }
4446
4447 if (enable)
4448 ftrace_graph_hash = hash;
4449 else
4450 ftrace_graph_notrace_hash = hash;
4438} 4451}
4439#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4452#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4440 4453
@@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
5488 * Normally the mcount trampoline will call the ops->func, but there 5501 * Normally the mcount trampoline will call the ops->func, but there
5489 * are times that it should not. For example, if the ops does not 5502 * are times that it should not. For example, if the ops does not
5490 * have its own recursion protection, then it should call the 5503 * have its own recursion protection, then it should call the
5491 * ftrace_ops_recurs_func() instead. 5504 * ftrace_ops_assist_func() instead.
5492 * 5505 *
5493 * Returns the function that the trampoline should call for @ops. 5506 * Returns the function that the trampoline should call for @ops.
5494 */ 5507 */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 707445ceb7ef..f35109514a01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4341,22 +4341,22 @@ static const char readme_msg[] =
4341 "\t\t\t traces\n" 4341 "\t\t\t traces\n"
4342#endif 4342#endif
4343#endif /* CONFIG_STACK_TRACER */ 4343#endif /* CONFIG_STACK_TRACER */
4344#ifdef CONFIG_KPROBE_EVENT 4344#ifdef CONFIG_KPROBE_EVENTS
4345 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" 4345 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4346 "\t\t\t Write into this file to define/undefine new trace events.\n" 4346 "\t\t\t Write into this file to define/undefine new trace events.\n"
4347#endif 4347#endif
4348#ifdef CONFIG_UPROBE_EVENT 4348#ifdef CONFIG_UPROBE_EVENTS
4349 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" 4349 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4350 "\t\t\t Write into this file to define/undefine new trace events.\n" 4350 "\t\t\t Write into this file to define/undefine new trace events.\n"
4351#endif 4351#endif
4352#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT) 4352#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4353 "\t accepts: event-definitions (one definition per line)\n" 4353 "\t accepts: event-definitions (one definition per line)\n"
4354 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n" 4354 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4355 "\t -:[<group>/]<event>\n" 4355 "\t -:[<group>/]<event>\n"
4356#ifdef CONFIG_KPROBE_EVENT 4356#ifdef CONFIG_KPROBE_EVENTS
4357 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 4357 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4358#endif 4358#endif
4359#ifdef CONFIG_UPROBE_EVENT 4359#ifdef CONFIG_UPROBE_EVENTS
4360 "\t place: <path>:<offset>\n" 4360 "\t place: <path>:<offset>\n"
4361#endif 4361#endif
4362 "\t args: <name>=fetcharg[:type]\n" 4362 "\t args: <name>=fetcharg[:type]\n"
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 0c0ae54d44c6..903273c93e61 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -248,7 +248,7 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \
248#define FETCH_TYPE_STRING 0 248#define FETCH_TYPE_STRING 0
249#define FETCH_TYPE_STRSIZE 1 249#define FETCH_TYPE_STRSIZE 1
250 250
251#ifdef CONFIG_KPROBE_EVENT 251#ifdef CONFIG_KPROBE_EVENTS
252struct symbol_cache; 252struct symbol_cache;
253unsigned long update_symbol_cache(struct symbol_cache *sc); 253unsigned long update_symbol_cache(struct symbol_cache *sc);
254void free_symbol_cache(struct symbol_cache *sc); 254void free_symbol_cache(struct symbol_cache *sc);
@@ -278,7 +278,7 @@ alloc_symbol_cache(const char *sym, long offset)
278{ 278{
279 return NULL; 279 return NULL;
280} 280}
281#endif /* CONFIG_KPROBE_EVENT */ 281#endif /* CONFIG_KPROBE_EVENTS */
282 282
283struct probe_arg { 283struct probe_arg {
284 struct fetch_param fetch; 284 struct fetch_param fetch;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 1d68b5b7ad41..5fb1f2c87e6b 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -65,7 +65,7 @@ void stack_trace_print(void)
65} 65}
66 66
67/* 67/*
68 * When arch-specific code overides this function, the following 68 * When arch-specific code overrides this function, the following
69 * data should be filled up, assuming stack_trace_max_lock is held to 69 * data should be filled up, assuming stack_trace_max_lock is held to
70 * prevent concurrent updates. 70 * prevent concurrent updates.
71 * stack_trace_index[] 71 * stack_trace_index[]
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 62630a40ab3a..b4eeee03934f 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
144 144
145 new->ns = ns; 145 new->ns = ns;
146 new->uid = uid; 146 new->uid = uid;
147 atomic_set(&new->count, 0); 147 new->count = 0;
148 148
149 spin_lock_irq(&ucounts_lock); 149 spin_lock_irq(&ucounts_lock);
150 ucounts = find_ucounts(ns, uid, hashent); 150 ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
155 ucounts = new; 155 ucounts = new;
156 } 156 }
157 } 157 }
158 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 158 if (ucounts->count == INT_MAX)
159 ucounts = NULL; 159 ucounts = NULL;
160 else
161 ucounts->count += 1;
160 spin_unlock_irq(&ucounts_lock); 162 spin_unlock_irq(&ucounts_lock);
161 return ucounts; 163 return ucounts;
162} 164}
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
165{ 167{
166 unsigned long flags; 168 unsigned long flags;
167 169
168 if (atomic_dec_and_test(&ucounts->count)) { 170 spin_lock_irqsave(&ucounts_lock, flags);
169 spin_lock_irqsave(&ucounts_lock, flags); 171 ucounts->count -= 1;
172 if (!ucounts->count)
170 hlist_del_init(&ucounts->node); 173 hlist_del_init(&ucounts->node);
171 spin_unlock_irqrestore(&ucounts_lock, flags); 174 else
175 ucounts = NULL;
176 spin_unlock_irqrestore(&ucounts_lock, flags);
172 177
173 kfree(ucounts); 178 kfree(ucounts);
174 }
175} 179}
176 180
177static inline bool atomic_inc_below(atomic_t *v, int u) 181static inline bool atomic_inc_below(atomic_t *v, int u)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 072cbc9b175d..c0168b7da1ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1507,6 +1507,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1507 struct timer_list *timer = &dwork->timer; 1507 struct timer_list *timer = &dwork->timer;
1508 struct work_struct *work = &dwork->work; 1508 struct work_struct *work = &dwork->work;
1509 1509
1510 WARN_ON_ONCE(!wq);
1510 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1511 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1511 timer->data != (unsigned long)dwork); 1512 timer->data != (unsigned long)dwork);
1512 WARN_ON_ONCE(timer_pending(timer)); 1513 WARN_ON_ONCE(timer_pending(timer));
diff --git a/lib/ioremap.c b/lib/ioremap.c
index a3e14ce92a56..4bb30206b942 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -14,6 +14,7 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15 15
16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17static int __read_mostly ioremap_p4d_capable;
17static int __read_mostly ioremap_pud_capable; 18static int __read_mostly ioremap_pud_capable;
18static int __read_mostly ioremap_pmd_capable; 19static int __read_mostly ioremap_pmd_capable;
19static int __read_mostly ioremap_huge_disabled; 20static int __read_mostly ioremap_huge_disabled;
@@ -35,6 +36,11 @@ void __init ioremap_huge_init(void)
35 } 36 }
36} 37}
37 38
39static inline int ioremap_p4d_enabled(void)
40{
41 return ioremap_p4d_capable;
42}
43
38static inline int ioremap_pud_enabled(void) 44static inline int ioremap_pud_enabled(void)
39{ 45{
40 return ioremap_pud_capable; 46 return ioremap_pud_capable;
@@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void)
46} 52}
47 53
48#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 54#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
55static inline int ioremap_p4d_enabled(void) { return 0; }
49static inline int ioremap_pud_enabled(void) { return 0; } 56static inline int ioremap_pud_enabled(void) { return 0; }
50static inline int ioremap_pmd_enabled(void) { return 0; } 57static inline int ioremap_pmd_enabled(void) { return 0; }
51#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 58#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94 return 0; 101 return 0;
95} 102}
96 103
97static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, 104static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
98 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 105 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
99{ 106{
100 pud_t *pud; 107 pud_t *pud;
101 unsigned long next; 108 unsigned long next;
102 109
103 phys_addr -= addr; 110 phys_addr -= addr;
104 pud = pud_alloc(&init_mm, pgd, addr); 111 pud = pud_alloc(&init_mm, p4d, addr);
105 if (!pud) 112 if (!pud)
106 return -ENOMEM; 113 return -ENOMEM;
107 do { 114 do {
@@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
120 return 0; 127 return 0;
121} 128}
122 129
130static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
131 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
132{
133 p4d_t *p4d;
134 unsigned long next;
135
136 phys_addr -= addr;
137 p4d = p4d_alloc(&init_mm, pgd, addr);
138 if (!p4d)
139 return -ENOMEM;
140 do {
141 next = p4d_addr_end(addr, end);
142
143 if (ioremap_p4d_enabled() &&
144 ((next - addr) == P4D_SIZE) &&
145 IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
146 if (p4d_set_huge(p4d, phys_addr + addr, prot))
147 continue;
148 }
149
150 if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
151 return -ENOMEM;
152 } while (p4d++, addr = next, addr != end);
153 return 0;
154}
155
123int ioremap_page_range(unsigned long addr, 156int ioremap_page_range(unsigned long addr,
124 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 157 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
125{ 158{
@@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr,
135 pgd = pgd_offset_k(addr); 168 pgd = pgd_offset_k(addr);
136 do { 169 do {
137 next = pgd_addr_end(addr, end); 170 next = pgd_addr_end(addr, end);
138 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); 171 err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
139 if (err) 172 if (err)
140 break; 173 break;
141 } while (pgd++, addr = next, addr != end); 174 } while (pgd++, addr = next, addr != end);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5ed506d648c4..691a9ad48497 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
2129 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); 2129 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
2130 if (!bitmap) 2130 if (!bitmap)
2131 return 0; 2131 return 0;
2132 bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); 2132 if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
2133 kfree(bitmap); 2133 kfree(bitmap);
2134 } 2134 }
2135 2135
2136 return 1; 2136 return 1;
diff --git a/lib/refcount.c b/lib/refcount.c
index 1d33366189d1..aa09ad3c30b0 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
58 val = old; 58 val = old;
59 } 59 }
60 60
61 WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 61 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
62 62
63 return true; 63 return true;
64} 64}
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero);
66 66
67void refcount_add(unsigned int i, refcount_t *r) 67void refcount_add(unsigned int i, refcount_t *r)
68{ 68{
69 WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); 69 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
70} 70}
71EXPORT_SYMBOL_GPL(refcount_add); 71EXPORT_SYMBOL_GPL(refcount_add);
72 72
@@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r)
97 val = old; 97 val = old;
98 } 98 }
99 99
100 WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 100 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
101 101
102 return true; 102 return true;
103} 103}
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
111 */ 111 */
112void refcount_inc(refcount_t *r) 112void refcount_inc(refcount_t *r)
113{ 113{
114 WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); 114 WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
115} 115}
116EXPORT_SYMBOL_GPL(refcount_inc); 116EXPORT_SYMBOL_GPL(refcount_inc);
117 117
@@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
125 125
126 new = val - i; 126 new = val - i;
127 if (new > val) { 127 if (new > val) {
128 WARN(new > val, "refcount_t: underflow; use-after-free.\n"); 128 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
129 return false; 129 return false;
130 } 130 }
131 131
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test);
164 164
165void refcount_dec(refcount_t *r) 165void refcount_dec(refcount_t *r)
166{ 166{
167 WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); 167 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
168} 168}
169EXPORT_SYMBOL_GPL(refcount_dec); 169EXPORT_SYMBOL_GPL(refcount_dec);
170 170
@@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r)
204 204
205 new = val - 1; 205 new = val - 1;
206 if (new > val) { 206 if (new > val) {
207 WARN(new > val, "refcount_t: underflow; use-after-free.\n"); 207 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
208 return true; 208 return true;
209 } 209 }
210 210
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6d861d090e9f..c6f2a37028c2 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
683static void cgwb_bdi_destroy(struct backing_dev_info *bdi) 683static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
684{ 684{
685 struct radix_tree_iter iter; 685 struct radix_tree_iter iter;
686 struct rb_node *rbn;
687 void **slot; 686 void **slot;
688 687
689 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); 688 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
690 689
691 spin_lock_irq(&cgwb_lock); 690 spin_lock_irq(&cgwb_lock);
692
693 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) 691 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
694 cgwb_kill(*slot); 692 cgwb_kill(*slot);
695
696 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
697 struct bdi_writeback_congested *congested =
698 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
699
700 rb_erase(rbn, &bdi->cgwb_congested_tree);
701 congested->bdi = NULL; /* mark @congested unlinked */
702 }
703
704 spin_unlock_irq(&cgwb_lock); 693 spin_unlock_irq(&cgwb_lock);
705 694
706 /* 695 /*
707 * All cgwb's and their congested states must be shutdown and 696 * All cgwb's must be shutdown and released before returning. Drain
708 * released before returning. Drain the usage counter to wait for 697 * the usage counter to wait for all cgwb's ever created on @bdi.
709 * all cgwb's and cgwb_congested's ever created on @bdi.
710 */ 698 */
711 atomic_dec(&bdi->usage_cnt); 699 atomic_dec(&bdi->usage_cnt);
712 wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt)); 700 wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
701 /*
702 * Grab back our reference so that we hold it when @bdi gets
703 * re-registered.
704 */
705 atomic_inc(&bdi->usage_cnt);
713} 706}
714 707
715/** 708/**
@@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg)
749 spin_unlock_irq(&cgwb_lock); 742 spin_unlock_irq(&cgwb_lock);
750} 743}
751 744
745static void cgwb_bdi_exit(struct backing_dev_info *bdi)
746{
747 struct rb_node *rbn;
748
749 spin_lock_irq(&cgwb_lock);
750 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
751 struct bdi_writeback_congested *congested =
752 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
753
754 rb_erase(rbn, &bdi->cgwb_congested_tree);
755 congested->bdi = NULL; /* mark @congested unlinked */
756 }
757 spin_unlock_irq(&cgwb_lock);
758}
759
752#else /* CONFIG_CGROUP_WRITEBACK */ 760#else /* CONFIG_CGROUP_WRITEBACK */
753 761
754static int cgwb_bdi_init(struct backing_dev_info *bdi) 762static int cgwb_bdi_init(struct backing_dev_info *bdi)
@@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
769 return 0; 777 return 0;
770} 778}
771 779
772static void cgwb_bdi_destroy(struct backing_dev_info *bdi) 780static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
781
782static void cgwb_bdi_exit(struct backing_dev_info *bdi)
773{ 783{
774 wb_congested_put(bdi->wb_congested); 784 wb_congested_put(bdi->wb_congested);
775} 785}
@@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
857 MINOR(owner->devt)); 867 MINOR(owner->devt));
858 if (rc) 868 if (rc)
859 return rc; 869 return rc;
870 /* Leaking owner reference... */
871 WARN_ON(bdi->owner);
860 bdi->owner = owner; 872 bdi->owner = owner;
861 get_device(owner); 873 get_device(owner);
862 return 0; 874 return 0;
@@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi)
898{ 910{
899 WARN_ON_ONCE(bdi->dev); 911 WARN_ON_ONCE(bdi->dev);
900 wb_exit(&bdi->wb); 912 wb_exit(&bdi->wb);
913 cgwb_bdi_exit(bdi);
901} 914}
902 915
903static void release_bdi(struct kref *ref) 916static void release_bdi(struct kref *ref)
diff --git a/mm/gup.c b/mm/gup.c
index 9c047e951aa3..04aa405350dc 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -226,6 +226,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
226 unsigned int *page_mask) 226 unsigned int *page_mask)
227{ 227{
228 pgd_t *pgd; 228 pgd_t *pgd;
229 p4d_t *p4d;
229 pud_t *pud; 230 pud_t *pud;
230 pmd_t *pmd; 231 pmd_t *pmd;
231 spinlock_t *ptl; 232 spinlock_t *ptl;
@@ -243,8 +244,13 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
243 pgd = pgd_offset(mm, address); 244 pgd = pgd_offset(mm, address);
244 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
245 return no_page_table(vma, flags); 246 return no_page_table(vma, flags);
246 247 p4d = p4d_offset(pgd, address);
247 pud = pud_offset(pgd, address); 248 if (p4d_none(*p4d))
249 return no_page_table(vma, flags);
250 BUILD_BUG_ON(p4d_huge(*p4d));
251 if (unlikely(p4d_bad(*p4d)))
252 return no_page_table(vma, flags);
253 pud = pud_offset(p4d, address);
248 if (pud_none(*pud)) 254 if (pud_none(*pud))
249 return no_page_table(vma, flags); 255 return no_page_table(vma, flags);
250 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { 256 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
@@ -325,6 +331,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
325 struct page **page) 331 struct page **page)
326{ 332{
327 pgd_t *pgd; 333 pgd_t *pgd;
334 p4d_t *p4d;
328 pud_t *pud; 335 pud_t *pud;
329 pmd_t *pmd; 336 pmd_t *pmd;
330 pte_t *pte; 337 pte_t *pte;
@@ -338,7 +345,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
338 else 345 else
339 pgd = pgd_offset_gate(mm, address); 346 pgd = pgd_offset_gate(mm, address);
340 BUG_ON(pgd_none(*pgd)); 347 BUG_ON(pgd_none(*pgd));
341 pud = pud_offset(pgd, address); 348 p4d = p4d_offset(pgd, address);
349 BUG_ON(p4d_none(*p4d));
350 pud = pud_offset(p4d, address);
342 BUG_ON(pud_none(*pud)); 351 BUG_ON(pud_none(*pud));
343 pmd = pmd_offset(pud, address); 352 pmd = pmd_offset(pud, address);
344 if (pmd_none(*pmd)) 353 if (pmd_none(*pmd))
@@ -1400,13 +1409,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1400 return 1; 1409 return 1;
1401} 1410}
1402 1411
1403static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, 1412static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
1404 int write, struct page **pages, int *nr) 1413 int write, struct page **pages, int *nr)
1405{ 1414{
1406 unsigned long next; 1415 unsigned long next;
1407 pud_t *pudp; 1416 pud_t *pudp;
1408 1417
1409 pudp = pud_offset(&pgd, addr); 1418 pudp = pud_offset(&p4d, addr);
1410 do { 1419 do {
1411 pud_t pud = READ_ONCE(*pudp); 1420 pud_t pud = READ_ONCE(*pudp);
1412 1421
@@ -1428,6 +1437,31 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1428 return 1; 1437 return 1;
1429} 1438}
1430 1439
1440static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
1441 int write, struct page **pages, int *nr)
1442{
1443 unsigned long next;
1444 p4d_t *p4dp;
1445
1446 p4dp = p4d_offset(&pgd, addr);
1447 do {
1448 p4d_t p4d = READ_ONCE(*p4dp);
1449
1450 next = p4d_addr_end(addr, end);
1451 if (p4d_none(p4d))
1452 return 0;
1453 BUILD_BUG_ON(p4d_huge(p4d));
1454 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
1455 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
1456 P4D_SHIFT, next, write, pages, nr))
1457 return 0;
1458 } else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
1459 return 0;
1460 } while (p4dp++, addr = next, addr != end);
1461
1462 return 1;
1463}
1464
1431/* 1465/*
1432 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to 1466 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1433 * the regular GUP. It will only return non-negative values. 1467 * the regular GUP. It will only return non-negative values.
@@ -1478,7 +1512,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1478 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 1512 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1479 PGDIR_SHIFT, next, write, pages, &nr)) 1513 PGDIR_SHIFT, next, write, pages, &nr))
1480 break; 1514 break;
1481 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 1515 } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
1482 break; 1516 break;
1483 } while (pgdp++, addr = next, addr != end); 1517 } while (pgdp++, addr = next, addr != end);
1484 local_irq_restore(flags); 1518 local_irq_restore(flags);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d36b2af4d1bf..1ebc93e179f3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1828,7 +1828,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1828 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); 1828 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1829 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); 1829 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1830 1830
1831 count_vm_event(THP_SPLIT_PMD); 1831 count_vm_event(THP_SPLIT_PUD);
1832 1832
1833 pudp_huge_clear_flush_notify(vma, haddr, pud); 1833 pudp_huge_clear_flush_notify(vma, haddr, pud);
1834} 1834}
@@ -2048,6 +2048,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2048 bool freeze, struct page *page) 2048 bool freeze, struct page *page)
2049{ 2049{
2050 pgd_t *pgd; 2050 pgd_t *pgd;
2051 p4d_t *p4d;
2051 pud_t *pud; 2052 pud_t *pud;
2052 pmd_t *pmd; 2053 pmd_t *pmd;
2053 2054
@@ -2055,7 +2056,11 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2055 if (!pgd_present(*pgd)) 2056 if (!pgd_present(*pgd))
2056 return; 2057 return;
2057 2058
2058 pud = pud_offset(pgd, address); 2059 p4d = p4d_offset(pgd, address);
2060 if (!p4d_present(*p4d))
2061 return;
2062
2063 pud = pud_offset(p4d, address);
2059 if (!pud_present(*pud)) 2064 if (!pud_present(*pud))
2060 return; 2065 return;
2061 2066
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a7aa811b7d14..3d0aab9ee80d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4555,7 +4555,8 @@ out:
4555int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 4555int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4556{ 4556{
4557 pgd_t *pgd = pgd_offset(mm, *addr); 4557 pgd_t *pgd = pgd_offset(mm, *addr);
4558 pud_t *pud = pud_offset(pgd, *addr); 4558 p4d_t *p4d = p4d_offset(pgd, *addr);
4559 pud_t *pud = pud_offset(p4d, *addr);
4559 4560
4560 BUG_ON(page_count(virt_to_page(ptep)) == 0); 4561 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4561 if (page_count(virt_to_page(ptep)) == 1) 4562 if (page_count(virt_to_page(ptep)) == 1)
@@ -4586,11 +4587,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
4586 unsigned long addr, unsigned long sz) 4587 unsigned long addr, unsigned long sz)
4587{ 4588{
4588 pgd_t *pgd; 4589 pgd_t *pgd;
4590 p4d_t *p4d;
4589 pud_t *pud; 4591 pud_t *pud;
4590 pte_t *pte = NULL; 4592 pte_t *pte = NULL;
4591 4593
4592 pgd = pgd_offset(mm, addr); 4594 pgd = pgd_offset(mm, addr);
4593 pud = pud_alloc(mm, pgd, addr); 4595 p4d = p4d_offset(pgd, addr);
4596 pud = pud_alloc(mm, p4d, addr);
4594 if (pud) { 4597 if (pud) {
4595 if (sz == PUD_SIZE) { 4598 if (sz == PUD_SIZE) {
4596 pte = (pte_t *)pud; 4599 pte = (pte_t *)pud;
@@ -4610,18 +4613,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
4610pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 4613pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4611{ 4614{
4612 pgd_t *pgd; 4615 pgd_t *pgd;
4616 p4d_t *p4d;
4613 pud_t *pud; 4617 pud_t *pud;
4614 pmd_t *pmd = NULL; 4618 pmd_t *pmd;
4615 4619
4616 pgd = pgd_offset(mm, addr); 4620 pgd = pgd_offset(mm, addr);
4617 if (pgd_present(*pgd)) { 4621 if (!pgd_present(*pgd))
4618 pud = pud_offset(pgd, addr); 4622 return NULL;
4619 if (pud_present(*pud)) { 4623 p4d = p4d_offset(pgd, addr);
4620 if (pud_huge(*pud)) 4624 if (!p4d_present(*p4d))
4621 return (pte_t *)pud; 4625 return NULL;
4622 pmd = pmd_offset(pud, addr); 4626 pud = pud_offset(p4d, addr);
4623 } 4627 if (!pud_present(*pud))
4624 } 4628 return NULL;
4629 if (pud_huge(*pud))
4630 return (pte_t *)pud;
4631 pmd = pmd_offset(pud, addr);
4625 return (pte_t *) pmd; 4632 return (pte_t *) pmd;
4626} 4633}
4627 4634
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 31238dad85fb..b96a5f773d88 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -30,6 +30,9 @@
30 */ 30 */
31unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; 31unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
32 32
33#if CONFIG_PGTABLE_LEVELS > 4
34p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss;
35#endif
33#if CONFIG_PGTABLE_LEVELS > 3 36#if CONFIG_PGTABLE_LEVELS > 3
34pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; 37pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
35#endif 38#endif
@@ -82,10 +85,10 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
82 } while (pmd++, addr = next, addr != end); 85 } while (pmd++, addr = next, addr != end);
83} 86}
84 87
85static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, 88static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr,
86 unsigned long end) 89 unsigned long end)
87{ 90{
88 pud_t *pud = pud_offset(pgd, addr); 91 pud_t *pud = pud_offset(p4d, addr);
89 unsigned long next; 92 unsigned long next;
90 93
91 do { 94 do {
@@ -107,6 +110,23 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
107 } while (pud++, addr = next, addr != end); 110 } while (pud++, addr = next, addr != end);
108} 111}
109 112
113static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr,
114 unsigned long end)
115{
116 p4d_t *p4d = p4d_offset(pgd, addr);
117 unsigned long next;
118
119 do {
120 next = p4d_addr_end(addr, end);
121
122 if (p4d_none(*p4d)) {
123 p4d_populate(&init_mm, p4d,
124 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
125 }
126 zero_pud_populate(p4d, addr, next);
127 } while (p4d++, addr = next, addr != end);
128}
129
110/** 130/**
111 * kasan_populate_zero_shadow - populate shadow memory region with 131 * kasan_populate_zero_shadow - populate shadow memory region with
112 * kasan_zero_page 132 * kasan_zero_page
@@ -125,6 +145,7 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
125 next = pgd_addr_end(addr, end); 145 next = pgd_addr_end(addr, end);
126 146
127 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { 147 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
148 p4d_t *p4d;
128 pud_t *pud; 149 pud_t *pud;
129 pmd_t *pmd; 150 pmd_t *pmd;
130 151
@@ -135,9 +156,22 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
135 * 3,2 - level page tables where we don't have 156 * 3,2 - level page tables where we don't have
136 * puds,pmds, so pgd_populate(), pud_populate() 157 * puds,pmds, so pgd_populate(), pud_populate()
137 * is noops. 158 * is noops.
159 *
160 * The ifndef is required to avoid build breakage.
161 *
162 * With 5level-fixup.h, pgd_populate() is not nop and
163 * we reference kasan_zero_p4d. It's not defined
164 * unless 5-level paging enabled.
165 *
166 * The ifndef can be dropped once all KASAN-enabled
167 * architectures will switch to pgtable-nop4d.h.
138 */ 168 */
139 pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud)); 169#ifndef __ARCH_HAS_5LEVEL_HACK
140 pud = pud_offset(pgd, addr); 170 pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
171#endif
172 p4d = p4d_offset(pgd, addr);
173 p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
174 pud = pud_offset(p4d, addr);
141 pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); 175 pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
142 pmd = pmd_offset(pud, addr); 176 pmd = pmd_offset(pud, addr);
143 pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); 177 pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
@@ -148,6 +182,6 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
148 pgd_populate(&init_mm, pgd, 182 pgd_populate(&init_mm, pgd,
149 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); 183 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
150 } 184 }
151 zero_pud_populate(pgd, addr, next); 185 zero_p4d_populate(pgd, addr, next);
152 } while (pgd++, addr = next, addr != end); 186 } while (pgd++, addr = next, addr != end);
153} 187}
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 6f1ed1630873..3a8ddf8baf7d 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -25,6 +25,7 @@
25#include <linux/printk.h> 25#include <linux/printk.h>
26#include <linux/shrinker.h> 26#include <linux/shrinker.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/srcu.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/types.h> 30#include <linux/types.h>
30 31
@@ -103,6 +104,7 @@ static int quarantine_tail;
103/* Total size of all objects in global_quarantine across all batches. */ 104/* Total size of all objects in global_quarantine across all batches. */
104static unsigned long quarantine_size; 105static unsigned long quarantine_size;
105static DEFINE_SPINLOCK(quarantine_lock); 106static DEFINE_SPINLOCK(quarantine_lock);
107DEFINE_STATIC_SRCU(remove_cache_srcu);
106 108
107/* Maximum size of the global queue. */ 109/* Maximum size of the global queue. */
108static unsigned long quarantine_max_size; 110static unsigned long quarantine_max_size;
@@ -173,17 +175,22 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
173 struct qlist_head *q; 175 struct qlist_head *q;
174 struct qlist_head temp = QLIST_INIT; 176 struct qlist_head temp = QLIST_INIT;
175 177
178 /*
179 * Note: irq must be disabled until after we move the batch to the
180 * global quarantine. Otherwise quarantine_remove_cache() can miss
181 * some objects belonging to the cache if they are in our local temp
182 * list. quarantine_remove_cache() executes on_each_cpu() at the
183 * beginning which ensures that it either sees the objects in per-cpu
184 * lists or in the global quarantine.
185 */
176 local_irq_save(flags); 186 local_irq_save(flags);
177 187
178 q = this_cpu_ptr(&cpu_quarantine); 188 q = this_cpu_ptr(&cpu_quarantine);
179 qlist_put(q, &info->quarantine_link, cache->size); 189 qlist_put(q, &info->quarantine_link, cache->size);
180 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) 190 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
181 qlist_move_all(q, &temp); 191 qlist_move_all(q, &temp);
182 192
183 local_irq_restore(flags); 193 spin_lock(&quarantine_lock);
184
185 if (unlikely(!qlist_empty(&temp))) {
186 spin_lock_irqsave(&quarantine_lock, flags);
187 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); 194 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
188 qlist_move_all(&temp, &global_quarantine[quarantine_tail]); 195 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
189 if (global_quarantine[quarantine_tail].bytes >= 196 if (global_quarantine[quarantine_tail].bytes >=
@@ -196,20 +203,33 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
196 if (new_tail != quarantine_head) 203 if (new_tail != quarantine_head)
197 quarantine_tail = new_tail; 204 quarantine_tail = new_tail;
198 } 205 }
199 spin_unlock_irqrestore(&quarantine_lock, flags); 206 spin_unlock(&quarantine_lock);
200 } 207 }
208
209 local_irq_restore(flags);
201} 210}
202 211
203void quarantine_reduce(void) 212void quarantine_reduce(void)
204{ 213{
205 size_t total_size, new_quarantine_size, percpu_quarantines; 214 size_t total_size, new_quarantine_size, percpu_quarantines;
206 unsigned long flags; 215 unsigned long flags;
216 int srcu_idx;
207 struct qlist_head to_free = QLIST_INIT; 217 struct qlist_head to_free = QLIST_INIT;
208 218
209 if (likely(READ_ONCE(quarantine_size) <= 219 if (likely(READ_ONCE(quarantine_size) <=
210 READ_ONCE(quarantine_max_size))) 220 READ_ONCE(quarantine_max_size)))
211 return; 221 return;
212 222
223 /*
224 * srcu critical section ensures that quarantine_remove_cache()
225 * will not miss objects belonging to the cache while they are in our
226 * local to_free list. srcu is chosen because (1) it gives us private
227 * grace period domain that does not interfere with anything else,
228 * and (2) it allows synchronize_srcu() to return without waiting
229 * if there are no pending read critical sections (which is the
230 * expected case).
231 */
232 srcu_idx = srcu_read_lock(&remove_cache_srcu);
213 spin_lock_irqsave(&quarantine_lock, flags); 233 spin_lock_irqsave(&quarantine_lock, flags);
214 234
215 /* 235 /*
@@ -237,6 +257,7 @@ void quarantine_reduce(void)
237 spin_unlock_irqrestore(&quarantine_lock, flags); 257 spin_unlock_irqrestore(&quarantine_lock, flags);
238 258
239 qlist_free_all(&to_free, NULL); 259 qlist_free_all(&to_free, NULL);
260 srcu_read_unlock(&remove_cache_srcu, srcu_idx);
240} 261}
241 262
242static void qlist_move_cache(struct qlist_head *from, 263static void qlist_move_cache(struct qlist_head *from,
@@ -280,12 +301,28 @@ void quarantine_remove_cache(struct kmem_cache *cache)
280 unsigned long flags, i; 301 unsigned long flags, i;
281 struct qlist_head to_free = QLIST_INIT; 302 struct qlist_head to_free = QLIST_INIT;
282 303
304 /*
305 * Must be careful to not miss any objects that are being moved from
306 * per-cpu list to the global quarantine in quarantine_put(),
307 * nor objects being freed in quarantine_reduce(). on_each_cpu()
308 * achieves the first goal, while synchronize_srcu() achieves the
309 * second.
310 */
283 on_each_cpu(per_cpu_remove_cache, cache, 1); 311 on_each_cpu(per_cpu_remove_cache, cache, 1);
284 312
285 spin_lock_irqsave(&quarantine_lock, flags); 313 spin_lock_irqsave(&quarantine_lock, flags);
286 for (i = 0; i < QUARANTINE_BATCHES; i++) 314 for (i = 0; i < QUARANTINE_BATCHES; i++) {
315 if (qlist_empty(&global_quarantine[i]))
316 continue;
287 qlist_move_cache(&global_quarantine[i], &to_free, cache); 317 qlist_move_cache(&global_quarantine[i], &to_free, cache);
318 /* Scanning whole quarantine can take a while. */
319 spin_unlock_irqrestore(&quarantine_lock, flags);
320 cond_resched();
321 spin_lock_irqsave(&quarantine_lock, flags);
322 }
288 spin_unlock_irqrestore(&quarantine_lock, flags); 323 spin_unlock_irqrestore(&quarantine_lock, flags);
289 324
290 qlist_free_all(&to_free, cache); 325 qlist_free_all(&to_free, cache);
326
327 synchronize_srcu(&remove_cache_srcu);
291} 328}
diff --git a/mm/madvise.c b/mm/madvise.c
index dc5927c812d3..7a2abf0127ae 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -513,7 +513,43 @@ static long madvise_dontneed(struct vm_area_struct *vma,
513 if (!can_madv_dontneed_vma(vma)) 513 if (!can_madv_dontneed_vma(vma))
514 return -EINVAL; 514 return -EINVAL;
515 515
516 userfaultfd_remove(vma, prev, start, end); 516 if (!userfaultfd_remove(vma, start, end)) {
517 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
518
519 down_read(&current->mm->mmap_sem);
520 vma = find_vma(current->mm, start);
521 if (!vma)
522 return -ENOMEM;
523 if (start < vma->vm_start) {
524 /*
525 * This "vma" under revalidation is the one
526 * with the lowest vma->vm_start where start
527 * is also < vma->vm_end. If start <
528 * vma->vm_start it means an hole materialized
529 * in the user address space within the
530 * virtual range passed to MADV_DONTNEED.
531 */
532 return -ENOMEM;
533 }
534 if (!can_madv_dontneed_vma(vma))
535 return -EINVAL;
536 if (end > vma->vm_end) {
537 /*
538 * Don't fail if end > vma->vm_end. If the old
539 * vma was splitted while the mmap_sem was
540 * released the effect of the concurrent
541 * operation may not cause MADV_DONTNEED to
542 * have an undefined result. There may be an
543 * adjacent next vma that we'll walk
544 * next. userfaultfd_remove() will generate an
545 * UFFD_EVENT_REMOVE repetition on the
546 * end-vma->vm_end range, but the manager can
547 * handle a repetition fine.
548 */
549 end = vma->vm_end;
550 }
551 VM_WARN_ON(start >= end);
552 }
517 zap_page_range(vma, start, end - start); 553 zap_page_range(vma, start, end - start);
518 return 0; 554 return 0;
519} 555}
@@ -554,8 +590,10 @@ static long madvise_remove(struct vm_area_struct *vma,
554 * mmap_sem. 590 * mmap_sem.
555 */ 591 */
556 get_file(f); 592 get_file(f);
557 userfaultfd_remove(vma, prev, start, end); 593 if (userfaultfd_remove(vma, start, end)) {
558 up_read(&current->mm->mmap_sem); 594 /* mmap_sem was not released by userfaultfd_remove() */
595 up_read(&current->mm->mmap_sem);
596 }
559 error = vfs_fallocate(f, 597 error = vfs_fallocate(f,
560 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 598 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
561 offset, end - start); 599 offset, end - start);
diff --git a/mm/memblock.c b/mm/memblock.c
index b64b47803e52..696f06d17c4e 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1118,7 +1118,10 @@ unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1118 } 1118 }
1119 } while (left < right); 1119 } while (left < right);
1120 1120
1121 return min(PHYS_PFN(type->regions[right].base), max_pfn); 1121 if (right == type->cnt)
1122 return max_pfn;
1123 else
1124 return min(PHYS_PFN(type->regions[right].base), max_pfn);
1122} 1125}
1123 1126
1124/** 1127/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c52ec893e241..2bd7541d7c11 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -466,6 +466,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
466 struct mem_cgroup_tree_per_node *mctz; 466 struct mem_cgroup_tree_per_node *mctz;
467 467
468 mctz = soft_limit_tree_from_page(page); 468 mctz = soft_limit_tree_from_page(page);
469 if (!mctz)
470 return;
469 /* 471 /*
470 * Necessary to update all ancestors when hierarchy is used. 472 * Necessary to update all ancestors when hierarchy is used.
471 * because their event counter is not touched. 473 * because their event counter is not touched.
@@ -503,7 +505,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
503 for_each_node(nid) { 505 for_each_node(nid) {
504 mz = mem_cgroup_nodeinfo(memcg, nid); 506 mz = mem_cgroup_nodeinfo(memcg, nid);
505 mctz = soft_limit_tree_node(nid); 507 mctz = soft_limit_tree_node(nid);
506 mem_cgroup_remove_exceeded(mz, mctz); 508 if (mctz)
509 mem_cgroup_remove_exceeded(mz, mctz);
507 } 510 }
508} 511}
509 512
@@ -2558,7 +2561,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2558 * is empty. Do it lockless to prevent lock bouncing. Races 2561 * is empty. Do it lockless to prevent lock bouncing. Races
2559 * are acceptable as soft limit is best effort anyway. 2562 * are acceptable as soft limit is best effort anyway.
2560 */ 2563 */
2561 if (RB_EMPTY_ROOT(&mctz->rb_root)) 2564 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2562 return 0; 2565 return 0;
2563 2566
2564 /* 2567 /*
@@ -4135,17 +4138,22 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4135 kfree(memcg->nodeinfo[node]); 4138 kfree(memcg->nodeinfo[node]);
4136} 4139}
4137 4140
4138static void mem_cgroup_free(struct mem_cgroup *memcg) 4141static void __mem_cgroup_free(struct mem_cgroup *memcg)
4139{ 4142{
4140 int node; 4143 int node;
4141 4144
4142 memcg_wb_domain_exit(memcg);
4143 for_each_node(node) 4145 for_each_node(node)
4144 free_mem_cgroup_per_node_info(memcg, node); 4146 free_mem_cgroup_per_node_info(memcg, node);
4145 free_percpu(memcg->stat); 4147 free_percpu(memcg->stat);
4146 kfree(memcg); 4148 kfree(memcg);
4147} 4149}
4148 4150
4151static void mem_cgroup_free(struct mem_cgroup *memcg)
4152{
4153 memcg_wb_domain_exit(memcg);
4154 __mem_cgroup_free(memcg);
4155}
4156
4149static struct mem_cgroup *mem_cgroup_alloc(void) 4157static struct mem_cgroup *mem_cgroup_alloc(void)
4150{ 4158{
4151 struct mem_cgroup *memcg; 4159 struct mem_cgroup *memcg;
@@ -4196,7 +4204,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4196fail: 4204fail:
4197 if (memcg->id.id > 0) 4205 if (memcg->id.id > 0)
4198 idr_remove(&mem_cgroup_idr, memcg->id.id); 4206 idr_remove(&mem_cgroup_idr, memcg->id.id);
4199 mem_cgroup_free(memcg); 4207 __mem_cgroup_free(memcg);
4200 return NULL; 4208 return NULL;
4201} 4209}
4202 4210
diff --git a/mm/memory.c b/mm/memory.c
index a97a4cec2e1f..235ba51b2fbf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -445,7 +445,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
445 mm_dec_nr_pmds(tlb->mm); 445 mm_dec_nr_pmds(tlb->mm);
446} 446}
447 447
448static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 448static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
449 unsigned long addr, unsigned long end, 449 unsigned long addr, unsigned long end,
450 unsigned long floor, unsigned long ceiling) 450 unsigned long floor, unsigned long ceiling)
451{ 451{
@@ -454,7 +454,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
454 unsigned long start; 454 unsigned long start;
455 455
456 start = addr; 456 start = addr;
457 pud = pud_offset(pgd, addr); 457 pud = pud_offset(p4d, addr);
458 do { 458 do {
459 next = pud_addr_end(addr, end); 459 next = pud_addr_end(addr, end);
460 if (pud_none_or_clear_bad(pud)) 460 if (pud_none_or_clear_bad(pud))
@@ -462,6 +462,39 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
462 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 462 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
463 } while (pud++, addr = next, addr != end); 463 } while (pud++, addr = next, addr != end);
464 464
465 start &= P4D_MASK;
466 if (start < floor)
467 return;
468 if (ceiling) {
469 ceiling &= P4D_MASK;
470 if (!ceiling)
471 return;
472 }
473 if (end - 1 > ceiling - 1)
474 return;
475
476 pud = pud_offset(p4d, start);
477 p4d_clear(p4d);
478 pud_free_tlb(tlb, pud, start);
479}
480
481static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
482 unsigned long addr, unsigned long end,
483 unsigned long floor, unsigned long ceiling)
484{
485 p4d_t *p4d;
486 unsigned long next;
487 unsigned long start;
488
489 start = addr;
490 p4d = p4d_offset(pgd, addr);
491 do {
492 next = p4d_addr_end(addr, end);
493 if (p4d_none_or_clear_bad(p4d))
494 continue;
495 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
496 } while (p4d++, addr = next, addr != end);
497
465 start &= PGDIR_MASK; 498 start &= PGDIR_MASK;
466 if (start < floor) 499 if (start < floor)
467 return; 500 return;
@@ -473,9 +506,9 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
473 if (end - 1 > ceiling - 1) 506 if (end - 1 > ceiling - 1)
474 return; 507 return;
475 508
476 pud = pud_offset(pgd, start); 509 p4d = p4d_offset(pgd, start);
477 pgd_clear(pgd); 510 pgd_clear(pgd);
478 pud_free_tlb(tlb, pud, start); 511 p4d_free_tlb(tlb, p4d, start);
479} 512}
480 513
481/* 514/*
@@ -539,7 +572,7 @@ void free_pgd_range(struct mmu_gather *tlb,
539 next = pgd_addr_end(addr, end); 572 next = pgd_addr_end(addr, end);
540 if (pgd_none_or_clear_bad(pgd)) 573 if (pgd_none_or_clear_bad(pgd))
541 continue; 574 continue;
542 free_pud_range(tlb, pgd, addr, next, floor, ceiling); 575 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
543 } while (pgd++, addr = next, addr != end); 576 } while (pgd++, addr = next, addr != end);
544} 577}
545 578
@@ -658,7 +691,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
658 pte_t pte, struct page *page) 691 pte_t pte, struct page *page)
659{ 692{
660 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 693 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
661 pud_t *pud = pud_offset(pgd, addr); 694 p4d_t *p4d = p4d_offset(pgd, addr);
695 pud_t *pud = pud_offset(p4d, addr);
662 pmd_t *pmd = pmd_offset(pud, addr); 696 pmd_t *pmd = pmd_offset(pud, addr);
663 struct address_space *mapping; 697 struct address_space *mapping;
664 pgoff_t index; 698 pgoff_t index;
@@ -1023,16 +1057,16 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
1023} 1057}
1024 1058
1025static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1059static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1026 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 1060 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1027 unsigned long addr, unsigned long end) 1061 unsigned long addr, unsigned long end)
1028{ 1062{
1029 pud_t *src_pud, *dst_pud; 1063 pud_t *src_pud, *dst_pud;
1030 unsigned long next; 1064 unsigned long next;
1031 1065
1032 dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 1066 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1033 if (!dst_pud) 1067 if (!dst_pud)
1034 return -ENOMEM; 1068 return -ENOMEM;
1035 src_pud = pud_offset(src_pgd, addr); 1069 src_pud = pud_offset(src_p4d, addr);
1036 do { 1070 do {
1037 next = pud_addr_end(addr, end); 1071 next = pud_addr_end(addr, end);
1038 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1072 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
@@ -1056,6 +1090,28 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src
1056 return 0; 1090 return 0;
1057} 1091}
1058 1092
1093static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1094 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1095 unsigned long addr, unsigned long end)
1096{
1097 p4d_t *src_p4d, *dst_p4d;
1098 unsigned long next;
1099
1100 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1101 if (!dst_p4d)
1102 return -ENOMEM;
1103 src_p4d = p4d_offset(src_pgd, addr);
1104 do {
1105 next = p4d_addr_end(addr, end);
1106 if (p4d_none_or_clear_bad(src_p4d))
1107 continue;
1108 if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1109 vma, addr, next))
1110 return -ENOMEM;
1111 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1112 return 0;
1113}
1114
1059int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 1115int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1060 struct vm_area_struct *vma) 1116 struct vm_area_struct *vma)
1061{ 1117{
@@ -1111,7 +1167,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1111 next = pgd_addr_end(addr, end); 1167 next = pgd_addr_end(addr, end);
1112 if (pgd_none_or_clear_bad(src_pgd)) 1168 if (pgd_none_or_clear_bad(src_pgd))
1113 continue; 1169 continue;
1114 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 1170 if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1115 vma, addr, next))) { 1171 vma, addr, next))) {
1116 ret = -ENOMEM; 1172 ret = -ENOMEM;
1117 break; 1173 break;
@@ -1267,14 +1323,14 @@ next:
1267} 1323}
1268 1324
1269static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1325static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1270 struct vm_area_struct *vma, pgd_t *pgd, 1326 struct vm_area_struct *vma, p4d_t *p4d,
1271 unsigned long addr, unsigned long end, 1327 unsigned long addr, unsigned long end,
1272 struct zap_details *details) 1328 struct zap_details *details)
1273{ 1329{
1274 pud_t *pud; 1330 pud_t *pud;
1275 unsigned long next; 1331 unsigned long next;
1276 1332
1277 pud = pud_offset(pgd, addr); 1333 pud = pud_offset(p4d, addr);
1278 do { 1334 do {
1279 next = pud_addr_end(addr, end); 1335 next = pud_addr_end(addr, end);
1280 if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1336 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
@@ -1295,6 +1351,25 @@ next:
1295 return addr; 1351 return addr;
1296} 1352}
1297 1353
1354static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1355 struct vm_area_struct *vma, pgd_t *pgd,
1356 unsigned long addr, unsigned long end,
1357 struct zap_details *details)
1358{
1359 p4d_t *p4d;
1360 unsigned long next;
1361
1362 p4d = p4d_offset(pgd, addr);
1363 do {
1364 next = p4d_addr_end(addr, end);
1365 if (p4d_none_or_clear_bad(p4d))
1366 continue;
1367 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1368 } while (p4d++, addr = next, addr != end);
1369
1370 return addr;
1371}
1372
1298void unmap_page_range(struct mmu_gather *tlb, 1373void unmap_page_range(struct mmu_gather *tlb,
1299 struct vm_area_struct *vma, 1374 struct vm_area_struct *vma,
1300 unsigned long addr, unsigned long end, 1375 unsigned long addr, unsigned long end,
@@ -1310,7 +1385,7 @@ void unmap_page_range(struct mmu_gather *tlb,
1310 next = pgd_addr_end(addr, end); 1385 next = pgd_addr_end(addr, end);
1311 if (pgd_none_or_clear_bad(pgd)) 1386 if (pgd_none_or_clear_bad(pgd))
1312 continue; 1387 continue;
1313 next = zap_pud_range(tlb, vma, pgd, addr, next, details); 1388 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1314 } while (pgd++, addr = next, addr != end); 1389 } while (pgd++, addr = next, addr != end);
1315 tlb_end_vma(tlb, vma); 1390 tlb_end_vma(tlb, vma);
1316} 1391}
@@ -1465,16 +1540,24 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes);
1465pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1540pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1466 spinlock_t **ptl) 1541 spinlock_t **ptl)
1467{ 1542{
1468 pgd_t *pgd = pgd_offset(mm, addr); 1543 pgd_t *pgd;
1469 pud_t *pud = pud_alloc(mm, pgd, addr); 1544 p4d_t *p4d;
1470 if (pud) { 1545 pud_t *pud;
1471 pmd_t *pmd = pmd_alloc(mm, pud, addr); 1546 pmd_t *pmd;
1472 if (pmd) { 1547
1473 VM_BUG_ON(pmd_trans_huge(*pmd)); 1548 pgd = pgd_offset(mm, addr);
1474 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1549 p4d = p4d_alloc(mm, pgd, addr);
1475 } 1550 if (!p4d)
1476 } 1551 return NULL;
1477 return NULL; 1552 pud = pud_alloc(mm, p4d, addr);
1553 if (!pud)
1554 return NULL;
1555 pmd = pmd_alloc(mm, pud, addr);
1556 if (!pmd)
1557 return NULL;
1558
1559 VM_BUG_ON(pmd_trans_huge(*pmd));
1560 return pte_alloc_map_lock(mm, pmd, addr, ptl);
1478} 1561}
1479 1562
1480/* 1563/*
@@ -1740,7 +1823,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1740 return 0; 1823 return 0;
1741} 1824}
1742 1825
1743static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1826static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
1744 unsigned long addr, unsigned long end, 1827 unsigned long addr, unsigned long end,
1745 unsigned long pfn, pgprot_t prot) 1828 unsigned long pfn, pgprot_t prot)
1746{ 1829{
@@ -1748,7 +1831,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1748 unsigned long next; 1831 unsigned long next;
1749 1832
1750 pfn -= addr >> PAGE_SHIFT; 1833 pfn -= addr >> PAGE_SHIFT;
1751 pud = pud_alloc(mm, pgd, addr); 1834 pud = pud_alloc(mm, p4d, addr);
1752 if (!pud) 1835 if (!pud)
1753 return -ENOMEM; 1836 return -ENOMEM;
1754 do { 1837 do {
@@ -1760,6 +1843,26 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1760 return 0; 1843 return 0;
1761} 1844}
1762 1845
1846static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
1847 unsigned long addr, unsigned long end,
1848 unsigned long pfn, pgprot_t prot)
1849{
1850 p4d_t *p4d;
1851 unsigned long next;
1852
1853 pfn -= addr >> PAGE_SHIFT;
1854 p4d = p4d_alloc(mm, pgd, addr);
1855 if (!p4d)
1856 return -ENOMEM;
1857 do {
1858 next = p4d_addr_end(addr, end);
1859 if (remap_pud_range(mm, p4d, addr, next,
1860 pfn + (addr >> PAGE_SHIFT), prot))
1861 return -ENOMEM;
1862 } while (p4d++, addr = next, addr != end);
1863 return 0;
1864}
1865
1763/** 1866/**
1764 * remap_pfn_range - remap kernel memory to userspace 1867 * remap_pfn_range - remap kernel memory to userspace
1765 * @vma: user vma to map to 1868 * @vma: user vma to map to
@@ -1816,7 +1919,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1816 flush_cache_range(vma, addr, end); 1919 flush_cache_range(vma, addr, end);
1817 do { 1920 do {
1818 next = pgd_addr_end(addr, end); 1921 next = pgd_addr_end(addr, end);
1819 err = remap_pud_range(mm, pgd, addr, next, 1922 err = remap_p4d_range(mm, pgd, addr, next,
1820 pfn + (addr >> PAGE_SHIFT), prot); 1923 pfn + (addr >> PAGE_SHIFT), prot);
1821 if (err) 1924 if (err)
1822 break; 1925 break;
@@ -1932,7 +2035,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1932 return err; 2035 return err;
1933} 2036}
1934 2037
1935static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, 2038static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
1936 unsigned long addr, unsigned long end, 2039 unsigned long addr, unsigned long end,
1937 pte_fn_t fn, void *data) 2040 pte_fn_t fn, void *data)
1938{ 2041{
@@ -1940,7 +2043,7 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1940 unsigned long next; 2043 unsigned long next;
1941 int err; 2044 int err;
1942 2045
1943 pud = pud_alloc(mm, pgd, addr); 2046 pud = pud_alloc(mm, p4d, addr);
1944 if (!pud) 2047 if (!pud)
1945 return -ENOMEM; 2048 return -ENOMEM;
1946 do { 2049 do {
@@ -1952,6 +2055,26 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1952 return err; 2055 return err;
1953} 2056}
1954 2057
2058static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2059 unsigned long addr, unsigned long end,
2060 pte_fn_t fn, void *data)
2061{
2062 p4d_t *p4d;
2063 unsigned long next;
2064 int err;
2065
2066 p4d = p4d_alloc(mm, pgd, addr);
2067 if (!p4d)
2068 return -ENOMEM;
2069 do {
2070 next = p4d_addr_end(addr, end);
2071 err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
2072 if (err)
2073 break;
2074 } while (p4d++, addr = next, addr != end);
2075 return err;
2076}
2077
1955/* 2078/*
1956 * Scan a region of virtual memory, filling in page tables as necessary 2079 * Scan a region of virtual memory, filling in page tables as necessary
1957 * and calling a provided function on each leaf page table. 2080 * and calling a provided function on each leaf page table.
@@ -1970,7 +2093,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1970 pgd = pgd_offset(mm, addr); 2093 pgd = pgd_offset(mm, addr);
1971 do { 2094 do {
1972 next = pgd_addr_end(addr, end); 2095 next = pgd_addr_end(addr, end);
1973 err = apply_to_pud_range(mm, pgd, addr, next, fn, data); 2096 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
1974 if (err) 2097 if (err)
1975 break; 2098 break;
1976 } while (pgd++, addr = next, addr != end); 2099 } while (pgd++, addr = next, addr != end);
@@ -3653,11 +3776,15 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3653 }; 3776 };
3654 struct mm_struct *mm = vma->vm_mm; 3777 struct mm_struct *mm = vma->vm_mm;
3655 pgd_t *pgd; 3778 pgd_t *pgd;
3779 p4d_t *p4d;
3656 int ret; 3780 int ret;
3657 3781
3658 pgd = pgd_offset(mm, address); 3782 pgd = pgd_offset(mm, address);
3783 p4d = p4d_alloc(mm, pgd, address);
3784 if (!p4d)
3785 return VM_FAULT_OOM;
3659 3786
3660 vmf.pud = pud_alloc(mm, pgd, address); 3787 vmf.pud = pud_alloc(mm, p4d, address);
3661 if (!vmf.pud) 3788 if (!vmf.pud)
3662 return VM_FAULT_OOM; 3789 return VM_FAULT_OOM;
3663 if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { 3790 if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
@@ -3779,12 +3906,35 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3779} 3906}
3780EXPORT_SYMBOL_GPL(handle_mm_fault); 3907EXPORT_SYMBOL_GPL(handle_mm_fault);
3781 3908
3909#ifndef __PAGETABLE_P4D_FOLDED
3910/*
3911 * Allocate p4d page table.
3912 * We've already handled the fast-path in-line.
3913 */
3914int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3915{
3916 p4d_t *new = p4d_alloc_one(mm, address);
3917 if (!new)
3918 return -ENOMEM;
3919
3920 smp_wmb(); /* See comment in __pte_alloc */
3921
3922 spin_lock(&mm->page_table_lock);
3923 if (pgd_present(*pgd)) /* Another has populated it */
3924 p4d_free(mm, new);
3925 else
3926 pgd_populate(mm, pgd, new);
3927 spin_unlock(&mm->page_table_lock);
3928 return 0;
3929}
3930#endif /* __PAGETABLE_P4D_FOLDED */
3931
3782#ifndef __PAGETABLE_PUD_FOLDED 3932#ifndef __PAGETABLE_PUD_FOLDED
3783/* 3933/*
3784 * Allocate page upper directory. 3934 * Allocate page upper directory.
3785 * We've already handled the fast-path in-line. 3935 * We've already handled the fast-path in-line.
3786 */ 3936 */
3787int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 3937int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
3788{ 3938{
3789 pud_t *new = pud_alloc_one(mm, address); 3939 pud_t *new = pud_alloc_one(mm, address);
3790 if (!new) 3940 if (!new)
@@ -3793,10 +3943,17 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3793 smp_wmb(); /* See comment in __pte_alloc */ 3943 smp_wmb(); /* See comment in __pte_alloc */
3794 3944
3795 spin_lock(&mm->page_table_lock); 3945 spin_lock(&mm->page_table_lock);
3796 if (pgd_present(*pgd)) /* Another has populated it */ 3946#ifndef __ARCH_HAS_5LEVEL_HACK
3947 if (p4d_present(*p4d)) /* Another has populated it */
3797 pud_free(mm, new); 3948 pud_free(mm, new);
3798 else 3949 else
3799 pgd_populate(mm, pgd, new); 3950 p4d_populate(mm, p4d, new);
3951#else
3952 if (pgd_present(*p4d)) /* Another has populated it */
3953 pud_free(mm, new);
3954 else
3955 pgd_populate(mm, p4d, new);
3956#endif /* __ARCH_HAS_5LEVEL_HACK */
3800 spin_unlock(&mm->page_table_lock); 3957 spin_unlock(&mm->page_table_lock);
3801 return 0; 3958 return 0;
3802} 3959}
@@ -3839,6 +3996,7 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
3839 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) 3996 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
3840{ 3997{
3841 pgd_t *pgd; 3998 pgd_t *pgd;
3999 p4d_t *p4d;
3842 pud_t *pud; 4000 pud_t *pud;
3843 pmd_t *pmd; 4001 pmd_t *pmd;
3844 pte_t *ptep; 4002 pte_t *ptep;
@@ -3847,7 +4005,11 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
3847 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 4005 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
3848 goto out; 4006 goto out;
3849 4007
3850 pud = pud_offset(pgd, address); 4008 p4d = p4d_offset(pgd, address);
4009 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4010 goto out;
4011
4012 pud = pud_offset(p4d, address);
3851 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 4013 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
3852 goto out; 4014 goto out;
3853 4015
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 295479b792ec..6fa7208bcd56 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -125,9 +125,12 @@ void put_online_mems(void)
125 125
126} 126}
127 127
128/* Serializes write accesses to mem_hotplug.active_writer. */
129static DEFINE_MUTEX(memory_add_remove_lock);
130
128void mem_hotplug_begin(void) 131void mem_hotplug_begin(void)
129{ 132{
130 assert_held_device_hotplug(); 133 mutex_lock(&memory_add_remove_lock);
131 134
132 mem_hotplug.active_writer = current; 135 mem_hotplug.active_writer = current;
133 136
@@ -147,6 +150,7 @@ void mem_hotplug_done(void)
147 mem_hotplug.active_writer = NULL; 150 mem_hotplug.active_writer = NULL;
148 mutex_unlock(&mem_hotplug.lock); 151 mutex_unlock(&mem_hotplug.lock);
149 memhp_lock_release(); 152 memhp_lock_release();
153 mutex_unlock(&memory_add_remove_lock);
150} 154}
151 155
152/* add this memory to iomem resource */ 156/* add this memory to iomem resource */
diff --git a/mm/mlock.c b/mm/mlock.c
index 1050511f8b2b..0dd9ca18e19e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -380,6 +380,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
380 pte = get_locked_pte(vma->vm_mm, start, &ptl); 380 pte = get_locked_pte(vma->vm_mm, start, &ptl);
381 /* Make sure we do not cross the page table boundary */ 381 /* Make sure we do not cross the page table boundary */
382 end = pgd_addr_end(start, end); 382 end = pgd_addr_end(start, end);
383 end = p4d_addr_end(start, end);
383 end = pud_addr_end(start, end); 384 end = pud_addr_end(start, end);
384 end = pmd_addr_end(start, end); 385 end = pmd_addr_end(start, end);
385 386
@@ -442,7 +443,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
442 443
443 while (start < end) { 444 while (start < end) {
444 struct page *page; 445 struct page *page;
445 unsigned int page_mask; 446 unsigned int page_mask = 0;
446 unsigned long page_increm; 447 unsigned long page_increm;
447 struct pagevec pvec; 448 struct pagevec pvec;
448 struct zone *zone; 449 struct zone *zone;
@@ -456,8 +457,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
456 * suits munlock very well (and if somehow an abnormal page 457 * suits munlock very well (and if somehow an abnormal page
457 * has sneaked into the range, we won't oops here: great). 458 * has sneaked into the range, we won't oops here: great).
458 */ 459 */
459 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, 460 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
460 &page_mask);
461 461
462 if (page && !IS_ERR(page)) { 462 if (page && !IS_ERR(page)) {
463 if (PageTransTail(page)) { 463 if (PageTransTail(page)) {
@@ -468,8 +468,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
468 /* 468 /*
469 * Any THP page found by follow_page_mask() may 469 * Any THP page found by follow_page_mask() may
470 * have gotten split before reaching 470 * have gotten split before reaching
471 * munlock_vma_page(), so we need to recompute 471 * munlock_vma_page(), so we need to compute
472 * the page_mask here. 472 * the page_mask here instead.
473 */ 473 */
474 page_mask = munlock_vma_page(page); 474 page_mask = munlock_vma_page(page);
475 unlock_page(page); 475 unlock_page(page);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 848e946b08e5..8edd0d576254 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -193,14 +193,14 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
193} 193}
194 194
195static inline unsigned long change_pud_range(struct vm_area_struct *vma, 195static inline unsigned long change_pud_range(struct vm_area_struct *vma,
196 pgd_t *pgd, unsigned long addr, unsigned long end, 196 p4d_t *p4d, unsigned long addr, unsigned long end,
197 pgprot_t newprot, int dirty_accountable, int prot_numa) 197 pgprot_t newprot, int dirty_accountable, int prot_numa)
198{ 198{
199 pud_t *pud; 199 pud_t *pud;
200 unsigned long next; 200 unsigned long next;
201 unsigned long pages = 0; 201 unsigned long pages = 0;
202 202
203 pud = pud_offset(pgd, addr); 203 pud = pud_offset(p4d, addr);
204 do { 204 do {
205 next = pud_addr_end(addr, end); 205 next = pud_addr_end(addr, end);
206 if (pud_none_or_clear_bad(pud)) 206 if (pud_none_or_clear_bad(pud))
@@ -212,6 +212,26 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
212 return pages; 212 return pages;
213} 213}
214 214
215static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
216 pgd_t *pgd, unsigned long addr, unsigned long end,
217 pgprot_t newprot, int dirty_accountable, int prot_numa)
218{
219 p4d_t *p4d;
220 unsigned long next;
221 unsigned long pages = 0;
222
223 p4d = p4d_offset(pgd, addr);
224 do {
225 next = p4d_addr_end(addr, end);
226 if (p4d_none_or_clear_bad(p4d))
227 continue;
228 pages += change_pud_range(vma, p4d, addr, next, newprot,
229 dirty_accountable, prot_numa);
230 } while (p4d++, addr = next, addr != end);
231
232 return pages;
233}
234
215static unsigned long change_protection_range(struct vm_area_struct *vma, 235static unsigned long change_protection_range(struct vm_area_struct *vma,
216 unsigned long addr, unsigned long end, pgprot_t newprot, 236 unsigned long addr, unsigned long end, pgprot_t newprot,
217 int dirty_accountable, int prot_numa) 237 int dirty_accountable, int prot_numa)
@@ -230,7 +250,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
230 next = pgd_addr_end(addr, end); 250 next = pgd_addr_end(addr, end);
231 if (pgd_none_or_clear_bad(pgd)) 251 if (pgd_none_or_clear_bad(pgd))
232 continue; 252 continue;
233 pages += change_pud_range(vma, pgd, addr, next, newprot, 253 pages += change_p4d_range(vma, pgd, addr, next, newprot,
234 dirty_accountable, prot_numa); 254 dirty_accountable, prot_numa);
235 } while (pgd++, addr = next, addr != end); 255 } while (pgd++, addr = next, addr != end);
236 256
diff --git a/mm/mremap.c b/mm/mremap.c
index 8233b0105c82..cd8a1b199ef9 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -32,6 +32,7 @@
32static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 32static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
33{ 33{
34 pgd_t *pgd; 34 pgd_t *pgd;
35 p4d_t *p4d;
35 pud_t *pud; 36 pud_t *pud;
36 pmd_t *pmd; 37 pmd_t *pmd;
37 38
@@ -39,7 +40,11 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
39 if (pgd_none_or_clear_bad(pgd)) 40 if (pgd_none_or_clear_bad(pgd))
40 return NULL; 41 return NULL;
41 42
42 pud = pud_offset(pgd, addr); 43 p4d = p4d_offset(pgd, addr);
44 if (p4d_none_or_clear_bad(p4d))
45 return NULL;
46
47 pud = pud_offset(p4d, addr);
43 if (pud_none_or_clear_bad(pud)) 48 if (pud_none_or_clear_bad(pud))
44 return NULL; 49 return NULL;
45 50
@@ -54,11 +59,15 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
54 unsigned long addr) 59 unsigned long addr)
55{ 60{
56 pgd_t *pgd; 61 pgd_t *pgd;
62 p4d_t *p4d;
57 pud_t *pud; 63 pud_t *pud;
58 pmd_t *pmd; 64 pmd_t *pmd;
59 65
60 pgd = pgd_offset(mm, addr); 66 pgd = pgd_offset(mm, addr);
61 pud = pud_alloc(mm, pgd, addr); 67 p4d = p4d_alloc(mm, pgd, addr);
68 if (!p4d)
69 return NULL;
70 pud = pud_alloc(mm, p4d, addr);
62 if (!pud) 71 if (!pud)
63 return NULL; 72 return NULL;
64 73
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eaa64d2ffdc5..6cbde310abed 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -873,7 +873,8 @@ done_merging:
873 higher_page = page + (combined_pfn - pfn); 873 higher_page = page + (combined_pfn - pfn);
874 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); 874 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
875 higher_buddy = higher_page + (buddy_pfn - combined_pfn); 875 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
876 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 876 if (pfn_valid_within(buddy_pfn) &&
877 page_is_buddy(higher_page, higher_buddy, order + 1)) {
877 list_add_tail(&page->lru, 878 list_add_tail(&page->lru,
878 &zone->free_area[order].free_list[migratetype]); 879 &zone->free_area[order].free_list[migratetype]);
879 goto out; 880 goto out;
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index a23001a22c15..c4c9def8ffea 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -104,6 +104,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
104 struct mm_struct *mm = pvmw->vma->vm_mm; 104 struct mm_struct *mm = pvmw->vma->vm_mm;
105 struct page *page = pvmw->page; 105 struct page *page = pvmw->page;
106 pgd_t *pgd; 106 pgd_t *pgd;
107 p4d_t *p4d;
107 pud_t *pud; 108 pud_t *pud;
108 109
109 /* The only possible pmd mapping has been handled on last iteration */ 110 /* The only possible pmd mapping has been handled on last iteration */
@@ -133,7 +134,10 @@ restart:
133 pgd = pgd_offset(mm, pvmw->address); 134 pgd = pgd_offset(mm, pvmw->address);
134 if (!pgd_present(*pgd)) 135 if (!pgd_present(*pgd))
135 return false; 136 return false;
136 pud = pud_offset(pgd, pvmw->address); 137 p4d = p4d_offset(pgd, pvmw->address);
138 if (!p4d_present(*p4d))
139 return false;
140 pud = pud_offset(p4d, pvmw->address);
137 if (!pud_present(*pud)) 141 if (!pud_present(*pud))
138 return false; 142 return false;
139 pvmw->pmd = pmd_offset(pud, pvmw->address); 143 pvmw->pmd = pmd_offset(pud, pvmw->address);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 03761577ae86..60f7856e508f 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -69,14 +69,14 @@ again:
69 return err; 69 return err;
70} 70}
71 71
72static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, 72static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
73 struct mm_walk *walk) 73 struct mm_walk *walk)
74{ 74{
75 pud_t *pud; 75 pud_t *pud;
76 unsigned long next; 76 unsigned long next;
77 int err = 0; 77 int err = 0;
78 78
79 pud = pud_offset(pgd, addr); 79 pud = pud_offset(p4d, addr);
80 do { 80 do {
81 again: 81 again:
82 next = pud_addr_end(addr, end); 82 next = pud_addr_end(addr, end);
@@ -113,6 +113,32 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
113 return err; 113 return err;
114} 114}
115 115
116static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
117 struct mm_walk *walk)
118{
119 p4d_t *p4d;
120 unsigned long next;
121 int err = 0;
122
123 p4d = p4d_offset(pgd, addr);
124 do {
125 next = p4d_addr_end(addr, end);
126 if (p4d_none_or_clear_bad(p4d)) {
127 if (walk->pte_hole)
128 err = walk->pte_hole(addr, next, walk);
129 if (err)
130 break;
131 continue;
132 }
133 if (walk->pmd_entry || walk->pte_entry)
134 err = walk_pud_range(p4d, addr, next, walk);
135 if (err)
136 break;
137 } while (p4d++, addr = next, addr != end);
138
139 return err;
140}
141
116static int walk_pgd_range(unsigned long addr, unsigned long end, 142static int walk_pgd_range(unsigned long addr, unsigned long end,
117 struct mm_walk *walk) 143 struct mm_walk *walk)
118{ 144{
@@ -131,7 +157,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
131 continue; 157 continue;
132 } 158 }
133 if (walk->pmd_entry || walk->pte_entry) 159 if (walk->pmd_entry || walk->pte_entry)
134 err = walk_pud_range(pgd, addr, next, walk); 160 err = walk_p4d_range(pgd, addr, next, walk);
135 if (err) 161 if (err)
136 break; 162 break;
137 } while (pgd++, addr = next, addr != end); 163 } while (pgd++, addr = next, addr != end);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 538998a137d2..9ac639499bd1 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -21,7 +21,6 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
21 21
22/** 22/**
23 * pcpu_get_pages - get temp pages array 23 * pcpu_get_pages - get temp pages array
24 * @chunk: chunk of interest
25 * 24 *
26 * Returns pointer to array of pointers to struct page which can be indexed 25 * Returns pointer to array of pointers to struct page which can be indexed
27 * with pcpu_page_idx(). Note that there is only one array and accesses 26 * with pcpu_page_idx(). Note that there is only one array and accesses
@@ -30,7 +29,7 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
30 * RETURNS: 29 * RETURNS:
31 * Pointer to temp pages array on success. 30 * Pointer to temp pages array on success.
32 */ 31 */
33static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc) 32static struct page **pcpu_get_pages(void)
34{ 33{
35 static struct page **pages; 34 static struct page **pages;
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
@@ -275,7 +274,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
275{ 274{
276 struct page **pages; 275 struct page **pages;
277 276
278 pages = pcpu_get_pages(chunk); 277 pages = pcpu_get_pages();
279 if (!pages) 278 if (!pages)
280 return -ENOMEM; 279 return -ENOMEM;
281 280
@@ -313,7 +312,7 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
313 * successful population attempt so the temp pages array must 312 * successful population attempt so the temp pages array must
314 * be available now. 313 * be available now.
315 */ 314 */
316 pages = pcpu_get_pages(chunk); 315 pages = pcpu_get_pages();
317 BUG_ON(!pages); 316 BUG_ON(!pages);
318 317
319 /* unmap and free */ 318 /* unmap and free */
diff --git a/mm/percpu.c b/mm/percpu.c
index 5696039b5c07..60a6488e9e6d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1011,8 +1011,11 @@ area_found:
1011 mutex_unlock(&pcpu_alloc_mutex); 1011 mutex_unlock(&pcpu_alloc_mutex);
1012 } 1012 }
1013 1013
1014 if (chunk != pcpu_reserved_chunk) 1014 if (chunk != pcpu_reserved_chunk) {
1015 spin_lock_irqsave(&pcpu_lock, flags);
1015 pcpu_nr_empty_pop_pages -= occ_pages; 1016 pcpu_nr_empty_pop_pages -= occ_pages;
1017 spin_unlock_irqrestore(&pcpu_lock, flags);
1018 }
1016 1019
1017 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 1020 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1018 pcpu_schedule_balance_work(); 1021 pcpu_schedule_balance_work();
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 4ed5908c65b0..c99d9512a45b 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -22,6 +22,12 @@ void pgd_clear_bad(pgd_t *pgd)
22 pgd_clear(pgd); 22 pgd_clear(pgd);
23} 23}
24 24
25void p4d_clear_bad(p4d_t *p4d)
26{
27 p4d_ERROR(*p4d);
28 p4d_clear(p4d);
29}
30
25void pud_clear_bad(pud_t *pud) 31void pud_clear_bad(pud_t *pud)
26{ 32{
27 pud_ERROR(*pud); 33 pud_ERROR(*pud);
diff --git a/mm/rmap.c b/mm/rmap.c
index 2da487d6cea8..49ed681ccc7b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -684,6 +684,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
684pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 684pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
685{ 685{
686 pgd_t *pgd; 686 pgd_t *pgd;
687 p4d_t *p4d;
687 pud_t *pud; 688 pud_t *pud;
688 pmd_t *pmd = NULL; 689 pmd_t *pmd = NULL;
689 pmd_t pmde; 690 pmd_t pmde;
@@ -692,7 +693,11 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
692 if (!pgd_present(*pgd)) 693 if (!pgd_present(*pgd))
693 goto out; 694 goto out;
694 695
695 pud = pud_offset(pgd, address); 696 p4d = p4d_offset(pgd, address);
697 if (!p4d_present(*p4d))
698 goto out;
699
700 pud = pud_offset(p4d, address);
696 if (!pud_present(*pud)) 701 if (!pud_present(*pud))
697 goto out; 702 goto out;
698 703
@@ -1316,12 +1321,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1316 } 1321 }
1317 1322
1318 while (page_vma_mapped_walk(&pvmw)) { 1323 while (page_vma_mapped_walk(&pvmw)) {
1319 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1320 address = pvmw.address;
1321
1322 /* Unexpected PMD-mapped THP? */
1323 VM_BUG_ON_PAGE(!pvmw.pte, page);
1324
1325 /* 1324 /*
1326 * If the page is mlock()d, we cannot swap it out. 1325 * If the page is mlock()d, we cannot swap it out.
1327 * If it's recently referenced (perhaps page_referenced 1326 * If it's recently referenced (perhaps page_referenced
@@ -1345,6 +1344,13 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1345 continue; 1344 continue;
1346 } 1345 }
1347 1346
1347 /* Unexpected PMD-mapped THP? */
1348 VM_BUG_ON_PAGE(!pvmw.pte, page);
1349
1350 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1351 address = pvmw.address;
1352
1353
1348 if (!(flags & TTU_IGNORE_ACCESS)) { 1354 if (!(flags & TTU_IGNORE_ACCESS)) {
1349 if (ptep_clear_flush_young_notify(vma, address, 1355 if (ptep_clear_flush_young_notify(vma, address,
1350 pvmw.pte)) { 1356 pvmw.pte)) {
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 574c67b663fe..a56c3989f773 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -196,9 +196,9 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
196 return pmd; 196 return pmd;
197} 197}
198 198
199pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) 199pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
200{ 200{
201 pud_t *pud = pud_offset(pgd, addr); 201 pud_t *pud = pud_offset(p4d, addr);
202 if (pud_none(*pud)) { 202 if (pud_none(*pud)) {
203 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 203 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
204 if (!p) 204 if (!p)
@@ -208,6 +208,18 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
208 return pud; 208 return pud;
209} 209}
210 210
211p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
212{
213 p4d_t *p4d = p4d_offset(pgd, addr);
214 if (p4d_none(*p4d)) {
215 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
216 if (!p)
217 return NULL;
218 p4d_populate(&init_mm, p4d, p);
219 }
220 return p4d;
221}
222
211pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) 223pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
212{ 224{
213 pgd_t *pgd = pgd_offset_k(addr); 225 pgd_t *pgd = pgd_offset_k(addr);
@@ -225,6 +237,7 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
225{ 237{
226 unsigned long addr = start; 238 unsigned long addr = start;
227 pgd_t *pgd; 239 pgd_t *pgd;
240 p4d_t *p4d;
228 pud_t *pud; 241 pud_t *pud;
229 pmd_t *pmd; 242 pmd_t *pmd;
230 pte_t *pte; 243 pte_t *pte;
@@ -233,7 +246,10 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
233 pgd = vmemmap_pgd_populate(addr, node); 246 pgd = vmemmap_pgd_populate(addr, node);
234 if (!pgd) 247 if (!pgd)
235 return -ENOMEM; 248 return -ENOMEM;
236 pud = vmemmap_pud_populate(pgd, addr, node); 249 p4d = vmemmap_p4d_populate(pgd, addr, node);
250 if (!p4d)
251 return -ENOMEM;
252 pud = vmemmap_pud_populate(p4d, addr, node);
237 if (!pud) 253 if (!pud)
238 return -ENOMEM; 254 return -ENOMEM;
239 pmd = vmemmap_pmd_populate(pud, addr, node); 255 pmd = vmemmap_pmd_populate(pud, addr, node);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 521ef9b6064f..178130880b90 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1517,7 +1517,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1517 return 0; 1517 return 0;
1518} 1518}
1519 1519
1520static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 1520static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
1521 unsigned long addr, unsigned long end, 1521 unsigned long addr, unsigned long end,
1522 swp_entry_t entry, struct page *page) 1522 swp_entry_t entry, struct page *page)
1523{ 1523{
@@ -1525,7 +1525,7 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
1525 unsigned long next; 1525 unsigned long next;
1526 int ret; 1526 int ret;
1527 1527
1528 pud = pud_offset(pgd, addr); 1528 pud = pud_offset(p4d, addr);
1529 do { 1529 do {
1530 next = pud_addr_end(addr, end); 1530 next = pud_addr_end(addr, end);
1531 if (pud_none_or_clear_bad(pud)) 1531 if (pud_none_or_clear_bad(pud))
@@ -1537,6 +1537,26 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
1537 return 0; 1537 return 0;
1538} 1538}
1539 1539
1540static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
1541 unsigned long addr, unsigned long end,
1542 swp_entry_t entry, struct page *page)
1543{
1544 p4d_t *p4d;
1545 unsigned long next;
1546 int ret;
1547
1548 p4d = p4d_offset(pgd, addr);
1549 do {
1550 next = p4d_addr_end(addr, end);
1551 if (p4d_none_or_clear_bad(p4d))
1552 continue;
1553 ret = unuse_pud_range(vma, p4d, addr, next, entry, page);
1554 if (ret)
1555 return ret;
1556 } while (p4d++, addr = next, addr != end);
1557 return 0;
1558}
1559
1540static int unuse_vma(struct vm_area_struct *vma, 1560static int unuse_vma(struct vm_area_struct *vma,
1541 swp_entry_t entry, struct page *page) 1561 swp_entry_t entry, struct page *page)
1542{ 1562{
@@ -1560,7 +1580,7 @@ static int unuse_vma(struct vm_area_struct *vma,
1560 next = pgd_addr_end(addr, end); 1580 next = pgd_addr_end(addr, end);
1561 if (pgd_none_or_clear_bad(pgd)) 1581 if (pgd_none_or_clear_bad(pgd))
1562 continue; 1582 continue;
1563 ret = unuse_pud_range(vma, pgd, addr, next, entry, page); 1583 ret = unuse_p4d_range(vma, pgd, addr, next, entry, page);
1564 if (ret) 1584 if (ret)
1565 return ret; 1585 return ret;
1566 } while (pgd++, addr = next, addr != end); 1586 } while (pgd++, addr = next, addr != end);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 479e631d43c2..8bcb501bce60 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -128,19 +128,22 @@ out_unlock:
128static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 128static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
129{ 129{
130 pgd_t *pgd; 130 pgd_t *pgd;
131 p4d_t *p4d;
131 pud_t *pud; 132 pud_t *pud;
132 pmd_t *pmd = NULL;
133 133
134 pgd = pgd_offset(mm, address); 134 pgd = pgd_offset(mm, address);
135 pud = pud_alloc(mm, pgd, address); 135 p4d = p4d_alloc(mm, pgd, address);
136 if (pud) 136 if (!p4d)
137 /* 137 return NULL;
138 * Note that we didn't run this because the pmd was 138 pud = pud_alloc(mm, p4d, address);
139 * missing, the *pmd may be already established and in 139 if (!pud)
140 * turn it may also be a trans_huge_pmd. 140 return NULL;
141 */ 141 /*
142 pmd = pmd_alloc(mm, pud, address); 142 * Note that we didn't run this because the pmd was
143 return pmd; 143 * missing, the *pmd may be already established and in
144 * turn it may also be a trans_huge_pmd.
145 */
146 return pmd_alloc(mm, pud, address);
144} 147}
145 148
146#ifdef CONFIG_HUGETLB_PAGE 149#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b4024d688f38..0b057628a7ba 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -86,12 +86,12 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
86 } while (pmd++, addr = next, addr != end); 86 } while (pmd++, addr = next, addr != end);
87} 87}
88 88
89static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 89static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
90{ 90{
91 pud_t *pud; 91 pud_t *pud;
92 unsigned long next; 92 unsigned long next;
93 93
94 pud = pud_offset(pgd, addr); 94 pud = pud_offset(p4d, addr);
95 do { 95 do {
96 next = pud_addr_end(addr, end); 96 next = pud_addr_end(addr, end);
97 if (pud_clear_huge(pud)) 97 if (pud_clear_huge(pud))
@@ -102,6 +102,22 @@ static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
102 } while (pud++, addr = next, addr != end); 102 } while (pud++, addr = next, addr != end);
103} 103}
104 104
105static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
106{
107 p4d_t *p4d;
108 unsigned long next;
109
110 p4d = p4d_offset(pgd, addr);
111 do {
112 next = p4d_addr_end(addr, end);
113 if (p4d_clear_huge(p4d))
114 continue;
115 if (p4d_none_or_clear_bad(p4d))
116 continue;
117 vunmap_pud_range(p4d, addr, next);
118 } while (p4d++, addr = next, addr != end);
119}
120
105static void vunmap_page_range(unsigned long addr, unsigned long end) 121static void vunmap_page_range(unsigned long addr, unsigned long end)
106{ 122{
107 pgd_t *pgd; 123 pgd_t *pgd;
@@ -113,7 +129,7 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
113 next = pgd_addr_end(addr, end); 129 next = pgd_addr_end(addr, end);
114 if (pgd_none_or_clear_bad(pgd)) 130 if (pgd_none_or_clear_bad(pgd))
115 continue; 131 continue;
116 vunmap_pud_range(pgd, addr, next); 132 vunmap_p4d_range(pgd, addr, next);
117 } while (pgd++, addr = next, addr != end); 133 } while (pgd++, addr = next, addr != end);
118} 134}
119 135
@@ -160,13 +176,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
160 return 0; 176 return 0;
161} 177}
162 178
163static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 179static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
164 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 180 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
165{ 181{
166 pud_t *pud; 182 pud_t *pud;
167 unsigned long next; 183 unsigned long next;
168 184
169 pud = pud_alloc(&init_mm, pgd, addr); 185 pud = pud_alloc(&init_mm, p4d, addr);
170 if (!pud) 186 if (!pud)
171 return -ENOMEM; 187 return -ENOMEM;
172 do { 188 do {
@@ -177,6 +193,23 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
177 return 0; 193 return 0;
178} 194}
179 195
196static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
197 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
198{
199 p4d_t *p4d;
200 unsigned long next;
201
202 p4d = p4d_alloc(&init_mm, pgd, addr);
203 if (!p4d)
204 return -ENOMEM;
205 do {
206 next = p4d_addr_end(addr, end);
207 if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
208 return -ENOMEM;
209 } while (p4d++, addr = next, addr != end);
210 return 0;
211}
212
180/* 213/*
181 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 214 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
182 * will have pfns corresponding to the "pages" array. 215 * will have pfns corresponding to the "pages" array.
@@ -196,7 +229,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
196 pgd = pgd_offset_k(addr); 229 pgd = pgd_offset_k(addr);
197 do { 230 do {
198 next = pgd_addr_end(addr, end); 231 next = pgd_addr_end(addr, end);
199 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 232 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
200 if (err) 233 if (err)
201 return err; 234 return err;
202 } while (pgd++, addr = next, addr != end); 235 } while (pgd++, addr = next, addr != end);
@@ -237,6 +270,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
237 unsigned long addr = (unsigned long) vmalloc_addr; 270 unsigned long addr = (unsigned long) vmalloc_addr;
238 struct page *page = NULL; 271 struct page *page = NULL;
239 pgd_t *pgd = pgd_offset_k(addr); 272 pgd_t *pgd = pgd_offset_k(addr);
273 p4d_t *p4d;
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *ptep, pte;
240 277
241 /* 278 /*
242 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 279 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
@@ -244,21 +281,23 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
244 */ 281 */
245 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 282 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
246 283
247 if (!pgd_none(*pgd)) { 284 if (pgd_none(*pgd))
248 pud_t *pud = pud_offset(pgd, addr); 285 return NULL;
249 if (!pud_none(*pud)) { 286 p4d = p4d_offset(pgd, addr);
250 pmd_t *pmd = pmd_offset(pud, addr); 287 if (p4d_none(*p4d))
251 if (!pmd_none(*pmd)) { 288 return NULL;
252 pte_t *ptep, pte; 289 pud = pud_offset(p4d, addr);
253 290 if (pud_none(*pud))
254 ptep = pte_offset_map(pmd, addr); 291 return NULL;
255 pte = *ptep; 292 pmd = pmd_offset(pud, addr);
256 if (pte_present(pte)) 293 if (pmd_none(*pmd))
257 page = pte_page(pte); 294 return NULL;
258 pte_unmap(ptep); 295
259 } 296 ptep = pte_offset_map(pmd, addr);
260 } 297 pte = *ptep;
261 } 298 if (pte_present(pte))
299 page = pte_page(pte);
300 pte_unmap(ptep);
262 return page; 301 return page;
263} 302}
264EXPORT_SYMBOL(vmalloc_to_page); 303EXPORT_SYMBOL(vmalloc_to_page);
@@ -1644,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1644 1683
1645 if (fatal_signal_pending(current)) { 1684 if (fatal_signal_pending(current)) {
1646 area->nr_pages = i; 1685 area->nr_pages = i;
1647 goto fail; 1686 goto fail_no_warn;
1648 } 1687 }
1649 1688
1650 if (node == NUMA_NO_NODE) 1689 if (node == NUMA_NO_NODE)
@@ -1670,6 +1709,7 @@ fail:
1670 warn_alloc(gfp_mask, NULL, 1709 warn_alloc(gfp_mask, NULL,
1671 "vmalloc: allocation failure, allocated %ld of %ld bytes", 1710 "vmalloc: allocation failure, allocated %ld of %ld bytes",
1672 (area->nr_pages*PAGE_SIZE), area->size); 1711 (area->nr_pages*PAGE_SIZE), area->size);
1712fail_no_warn:
1673 vfree(area->addr); 1713 vfree(area->addr);
1674 return NULL; 1714 return NULL;
1675} 1715}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 69f9aff39a2e..b1947f0cbee2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1065,6 +1065,9 @@ const char * const vmstat_text[] = {
1065 "thp_split_page_failed", 1065 "thp_split_page_failed",
1066 "thp_deferred_split_page", 1066 "thp_deferred_split_page",
1067 "thp_split_pmd", 1067 "thp_split_pmd",
1068#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1069 "thp_split_pud",
1070#endif
1068 "thp_zero_page_alloc", 1071 "thp_zero_page_alloc",
1069 "thp_zero_page_alloc_failed", 1072 "thp_zero_page_alloc_failed",
1070#endif 1073#endif
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 8970a2fd3b1a..f9492bccfd79 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -667,6 +667,7 @@ next:
667 z3fold_page_unlock(zhdr); 667 z3fold_page_unlock(zhdr);
668 spin_lock(&pool->lock); 668 spin_lock(&pool->lock);
669 if (kref_put(&zhdr->refcount, release_z3fold_page)) { 669 if (kref_put(&zhdr->refcount, release_z3fold_page)) {
670 spin_unlock(&pool->lock);
670 atomic64_dec(&pool->pages_nr); 671 atomic64_dec(&pool->pages_nr);
671 return 0; 672 return 0;
672 } 673 }
diff --git a/net/atm/svc.c b/net/atm/svc.c
index db9794ec61d8..5589de7086af 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -318,7 +318,8 @@ out:
318 return error; 318 return error;
319} 319}
320 320
321static int svc_accept(struct socket *sock, struct socket *newsock, int flags) 321static int svc_accept(struct socket *sock, struct socket *newsock, int flags,
322 bool kern)
322{ 323{
323 struct sock *sk = sock->sk; 324 struct sock *sk = sock->sk;
324 struct sk_buff *skb; 325 struct sk_buff *skb;
@@ -329,7 +330,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
329 330
330 lock_sock(sk); 331 lock_sock(sk);
331 332
332 error = svc_create(sock_net(sk), newsock, 0, 0); 333 error = svc_create(sock_net(sk), newsock, 0, kern);
333 if (error) 334 if (error)
334 goto out; 335 goto out;
335 336
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index a8e42cedf1db..b7c486752b3a 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1320,7 +1320,8 @@ out_release:
1320 return err; 1320 return err;
1321} 1321}
1322 1322
1323static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) 1323static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
1324 bool kern)
1324{ 1325{
1325 struct sk_buff *skb; 1326 struct sk_buff *skb;
1326 struct sock *newsk; 1327 struct sock *newsk;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index f307b145ea54..507b80d59dec 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -301,7 +301,7 @@ done:
301} 301}
302 302
303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, 303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
304 int flags) 304 int flags, bool kern)
305{ 305{
306 DEFINE_WAIT_FUNC(wait, woken_wake_function); 306 DEFINE_WAIT_FUNC(wait, woken_wake_function);
307 struct sock *sk = sock->sk, *nsk; 307 struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aa1a814ceddc..ac3c650cb234 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -471,7 +471,8 @@ done:
471 return err; 471 return err;
472} 472}
473 473
474static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags) 474static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,
475 bool kern)
475{ 476{
476 DEFINE_WAIT_FUNC(wait, woken_wake_function); 477 DEFINE_WAIT_FUNC(wait, woken_wake_function);
477 struct sock *sk = sock->sk, *nsk; 478 struct sock *sk = sock->sk, *nsk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e4e9a2da1e7e..728e0c8dc8e7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -627,7 +627,7 @@ done:
627} 627}
628 628
629static int sco_sock_accept(struct socket *sock, struct socket *newsock, 629static int sco_sock_accept(struct socket *sock, struct socket *newsock,
630 int flags) 630 int flags, bool kern)
631{ 631{
632 DEFINE_WAIT_FUNC(wait, woken_wake_function); 632 DEFINE_WAIT_FUNC(wait, woken_wake_function);
633 struct sock *sk = sock->sk, *ch; 633 struct sock *sk = sock->sk, *ch;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 236f34244dbe..013f2290bfa5 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL(br_should_route_hook);
30static int 30static int
31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) 31br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
32{ 32{
33 br_drop_fake_rtable(skb);
33 return netif_receive_skb(skb); 34 return netif_receive_skb(skb);
34} 35}
35 36
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 95087e6e8258..fa87fbd62bb7 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv,
521} 521}
522 522
523 523
524/* PF_BRIDGE/LOCAL_IN ************************************************/
525/* The packet is locally destined, which requires a real
526 * dst_entry, so detach the fake one. On the way up, the
527 * packet would pass through PRE_ROUTING again (which already
528 * took place when the packet entered the bridge), but we
529 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
530 * prevent this from happening. */
531static unsigned int br_nf_local_in(void *priv,
532 struct sk_buff *skb,
533 const struct nf_hook_state *state)
534{
535 br_drop_fake_rtable(skb);
536 return NF_ACCEPT;
537}
538
539/* PF_BRIDGE/FORWARD *************************************************/ 524/* PF_BRIDGE/FORWARD *************************************************/
540static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 525static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
541{ 526{
@@ -908,12 +893,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
908 .priority = NF_BR_PRI_BRNF, 893 .priority = NF_BR_PRI_BRNF,
909 }, 894 },
910 { 895 {
911 .hook = br_nf_local_in,
912 .pf = NFPROTO_BRIDGE,
913 .hooknum = NF_BR_LOCAL_IN,
914 .priority = NF_BR_PRI_BRNF,
915 },
916 {
917 .hook = br_nf_forward_ip, 896 .hook = br_nf_forward_ip,
918 .pf = NFPROTO_BRIDGE, 897 .pf = NFPROTO_BRIDGE,
919 .hooknum = NF_BR_FORWARD, 898 .hooknum = NF_BR_FORWARD,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 464e88599b9d..108533859a53 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -230,6 +230,7 @@ enum {
230 Opt_osdkeepalivetimeout, 230 Opt_osdkeepalivetimeout,
231 Opt_mount_timeout, 231 Opt_mount_timeout,
232 Opt_osd_idle_ttl, 232 Opt_osd_idle_ttl,
233 Opt_osd_request_timeout,
233 Opt_last_int, 234 Opt_last_int,
234 /* int args above */ 235 /* int args above */
235 Opt_fsid, 236 Opt_fsid,
@@ -256,6 +257,7 @@ static match_table_t opt_tokens = {
256 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, 257 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
257 {Opt_mount_timeout, "mount_timeout=%d"}, 258 {Opt_mount_timeout, "mount_timeout=%d"},
258 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, 259 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
260 {Opt_osd_request_timeout, "osd_request_timeout=%d"},
259 /* int args above */ 261 /* int args above */
260 {Opt_fsid, "fsid=%s"}, 262 {Opt_fsid, "fsid=%s"},
261 {Opt_name, "name=%s"}, 263 {Opt_name, "name=%s"},
@@ -361,6 +363,7 @@ ceph_parse_options(char *options, const char *dev_name,
361 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 363 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
362 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; 364 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
363 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; 365 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
366 opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT;
364 367
365 /* get mon ip(s) */ 368 /* get mon ip(s) */
366 /* ip1[:port1][,ip2[:port2]...] */ 369 /* ip1[:port1][,ip2[:port2]...] */
@@ -473,6 +476,15 @@ ceph_parse_options(char *options, const char *dev_name,
473 } 476 }
474 opt->mount_timeout = msecs_to_jiffies(intval * 1000); 477 opt->mount_timeout = msecs_to_jiffies(intval * 1000);
475 break; 478 break;
479 case Opt_osd_request_timeout:
480 /* 0 is "wait forever" (i.e. infinite timeout) */
481 if (intval < 0 || intval > INT_MAX / 1000) {
482 pr_err("osd_request_timeout out of range\n");
483 err = -EINVAL;
484 goto out;
485 }
486 opt->osd_request_timeout = msecs_to_jiffies(intval * 1000);
487 break;
476 488
477 case Opt_share: 489 case Opt_share:
478 opt->flags &= ~CEPH_OPT_NOSHARE; 490 opt->flags &= ~CEPH_OPT_NOSHARE;
@@ -557,6 +569,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
557 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) 569 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
558 seq_printf(m, "osdkeepalivetimeout=%d,", 570 seq_printf(m, "osdkeepalivetimeout=%d,",
559 jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000); 571 jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000);
572 if (opt->osd_request_timeout != CEPH_OSD_REQUEST_TIMEOUT_DEFAULT)
573 seq_printf(m, "osd_request_timeout=%d,",
574 jiffies_to_msecs(opt->osd_request_timeout) / 1000);
560 575
561 /* drop redundant comma */ 576 /* drop redundant comma */
562 if (m->count != pos) 577 if (m->count != pos)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b65bbf9f45eb..e15ea9e4c495 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1709,6 +1709,8 @@ static void account_request(struct ceph_osd_request *req)
1709 1709
1710 req->r_flags |= CEPH_OSD_FLAG_ONDISK; 1710 req->r_flags |= CEPH_OSD_FLAG_ONDISK;
1711 atomic_inc(&req->r_osdc->num_requests); 1711 atomic_inc(&req->r_osdc->num_requests);
1712
1713 req->r_start_stamp = jiffies;
1712} 1714}
1713 1715
1714static void submit_request(struct ceph_osd_request *req, bool wrlocked) 1716static void submit_request(struct ceph_osd_request *req, bool wrlocked)
@@ -1789,6 +1791,14 @@ static void cancel_request(struct ceph_osd_request *req)
1789 ceph_osdc_put_request(req); 1791 ceph_osdc_put_request(req);
1790} 1792}
1791 1793
1794static void abort_request(struct ceph_osd_request *req, int err)
1795{
1796 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1797
1798 cancel_map_check(req);
1799 complete_request(req, err);
1800}
1801
1792static void check_pool_dne(struct ceph_osd_request *req) 1802static void check_pool_dne(struct ceph_osd_request *req)
1793{ 1803{
1794 struct ceph_osd_client *osdc = req->r_osdc; 1804 struct ceph_osd_client *osdc = req->r_osdc;
@@ -2487,6 +2497,7 @@ static void handle_timeout(struct work_struct *work)
2487 container_of(work, struct ceph_osd_client, timeout_work.work); 2497 container_of(work, struct ceph_osd_client, timeout_work.work);
2488 struct ceph_options *opts = osdc->client->options; 2498 struct ceph_options *opts = osdc->client->options;
2489 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout; 2499 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2500 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
2490 LIST_HEAD(slow_osds); 2501 LIST_HEAD(slow_osds);
2491 struct rb_node *n, *p; 2502 struct rb_node *n, *p;
2492 2503
@@ -2502,15 +2513,23 @@ static void handle_timeout(struct work_struct *work)
2502 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); 2513 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2503 bool found = false; 2514 bool found = false;
2504 2515
2505 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { 2516 for (p = rb_first(&osd->o_requests); p; ) {
2506 struct ceph_osd_request *req = 2517 struct ceph_osd_request *req =
2507 rb_entry(p, struct ceph_osd_request, r_node); 2518 rb_entry(p, struct ceph_osd_request, r_node);
2508 2519
2520 p = rb_next(p); /* abort_request() */
2521
2509 if (time_before(req->r_stamp, cutoff)) { 2522 if (time_before(req->r_stamp, cutoff)) {
2510 dout(" req %p tid %llu on osd%d is laggy\n", 2523 dout(" req %p tid %llu on osd%d is laggy\n",
2511 req, req->r_tid, osd->o_osd); 2524 req, req->r_tid, osd->o_osd);
2512 found = true; 2525 found = true;
2513 } 2526 }
2527 if (opts->osd_request_timeout &&
2528 time_before(req->r_start_stamp, expiry_cutoff)) {
2529 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2530 req->r_tid, osd->o_osd);
2531 abort_request(req, -ETIMEDOUT);
2532 }
2514 } 2533 }
2515 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { 2534 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2516 struct ceph_osd_linger_request *lreq = 2535 struct ceph_osd_linger_request *lreq =
@@ -2530,6 +2549,21 @@ static void handle_timeout(struct work_struct *work)
2530 list_move_tail(&osd->o_keepalive_item, &slow_osds); 2549 list_move_tail(&osd->o_keepalive_item, &slow_osds);
2531 } 2550 }
2532 2551
2552 if (opts->osd_request_timeout) {
2553 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
2554 struct ceph_osd_request *req =
2555 rb_entry(p, struct ceph_osd_request, r_node);
2556
2557 p = rb_next(p); /* abort_request() */
2558
2559 if (time_before(req->r_start_stamp, expiry_cutoff)) {
2560 pr_err_ratelimited("tid %llu on osd%d timeout\n",
2561 req->r_tid, osdc->homeless_osd.o_osd);
2562 abort_request(req, -ETIMEDOUT);
2563 }
2564 }
2565 }
2566
2533 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds)) 2567 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2534 maybe_request_map(osdc); 2568 maybe_request_map(osdc);
2535 2569
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 6824c0ec8373..ffe9e904d4d1 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -390,9 +390,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
390 dout("crush decode tunable chooseleaf_stable = %d\n", 390 dout("crush decode tunable chooseleaf_stable = %d\n",
391 c->chooseleaf_stable); 391 c->chooseleaf_stable);
392 392
393 crush_finalize(c);
394
395done: 393done:
394 crush_finalize(c);
396 dout("crush_decode success\n"); 395 dout("crush_decode success\n");
397 return c; 396 return c;
398 397
@@ -1380,7 +1379,6 @@ static int decode_new_up_state_weight(void **p, void *end,
1380 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1379 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1381 (xorstate & CEPH_OSD_EXISTS)) { 1380 (xorstate & CEPH_OSD_EXISTS)) {
1382 pr_info("osd%d does not exist\n", osd); 1381 pr_info("osd%d does not exist\n", osd);
1383 map->osd_weight[osd] = CEPH_OSD_IN;
1384 ret = set_primary_affinity(map, osd, 1382 ret = set_primary_affinity(map, osd,
1385 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1383 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1386 if (ret) 1384 if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 8637b2b71f3d..7869ae3837ca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev)
1304{ 1304{
1305 rtnl_lock(); 1305 rtnl_lock();
1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 1306 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1307 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1307 rtnl_unlock(); 1308 rtnl_unlock();
1308} 1309}
1309EXPORT_SYMBOL(netdev_notify_peers); 1310EXPORT_SYMBOL(netdev_notify_peers);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3945821e9c1f..65ea0ff4017c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -953,7 +953,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
953 while (--i >= new_num) { 953 while (--i >= new_num) {
954 struct kobject *kobj = &dev->_rx[i].kobj; 954 struct kobject *kobj = &dev->_rx[i].kobj;
955 955
956 if (!list_empty(&dev_net(dev)->exit_list)) 956 if (!atomic_read(&dev_net(dev)->count))
957 kobj->uevent_suppress = 1; 957 kobj->uevent_suppress = 1;
958 if (dev->sysfs_rx_queue_group) 958 if (dev->sysfs_rx_queue_group)
959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); 959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -1371,7 +1371,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1371 while (--i >= new_num) { 1371 while (--i >= new_num) {
1372 struct netdev_queue *queue = dev->_tx + i; 1372 struct netdev_queue *queue = dev->_tx + i;
1373 1373
1374 if (!list_empty(&dev_net(dev)->exit_list)) 1374 if (!atomic_read(&dev_net(dev)->count))
1375 queue->kobj.uevent_suppress = 1; 1375 queue->kobj.uevent_suppress = 1;
1376#ifdef CONFIG_BQL 1376#ifdef CONFIG_BQL
1377 sysfs_remove_group(&queue->kobj, &dql_group); 1377 sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
1558{ 1558{
1559 struct device *dev = &(ndev->dev); 1559 struct device *dev = &(ndev->dev);
1560 1560
1561 if (!list_empty(&dev_net(ndev)->exit_list)) 1561 if (!atomic_read(&dev_net(ndev)->count))
1562 dev_set_uevent_suppress(dev, 1); 1562 dev_set_uevent_suppress(dev, 1);
1563 1563
1564 kobject_get(&dev->kobj); 1564 kobject_get(&dev->kobj);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f3557958e9bf..cd4ba8c6b609 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3828,13 +3828,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
3828 if (!skb_may_tx_timestamp(sk, false)) 3828 if (!skb_may_tx_timestamp(sk, false))
3829 return; 3829 return;
3830 3830
3831 /* take a reference to prevent skb_orphan() from freeing the socket */ 3831 /* Take a reference to prevent skb_orphan() from freeing the socket,
3832 sock_hold(sk); 3832 * but only if the socket refcount is not zero.
3833 3833 */
3834 *skb_hwtstamps(skb) = *hwtstamps; 3834 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3835 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3835 *skb_hwtstamps(skb) = *hwtstamps;
3836 3836 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
3837 sock_put(sk); 3837 sock_put(sk);
3838 }
3838} 3839}
3839EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3840EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3840 3841
@@ -3893,7 +3894,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3893{ 3894{
3894 struct sock *sk = skb->sk; 3895 struct sock *sk = skb->sk;
3895 struct sock_exterr_skb *serr; 3896 struct sock_exterr_skb *serr;
3896 int err; 3897 int err = 1;
3897 3898
3898 skb->wifi_acked_valid = 1; 3899 skb->wifi_acked_valid = 1;
3899 skb->wifi_acked = acked; 3900 skb->wifi_acked = acked;
@@ -3903,14 +3904,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3903 serr->ee.ee_errno = ENOMSG; 3904 serr->ee.ee_errno = ENOMSG;
3904 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3905 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3905 3906
3906 /* take a reference to prevent skb_orphan() from freeing the socket */ 3907 /* Take a reference to prevent skb_orphan() from freeing the socket,
3907 sock_hold(sk); 3908 * but only if the socket refcount is not zero.
3908 3909 */
3909 err = sock_queue_err_skb(sk, skb); 3910 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3911 err = sock_queue_err_skb(sk, skb);
3912 sock_put(sk);
3913 }
3910 if (err) 3914 if (err)
3911 kfree_skb(skb); 3915 kfree_skb(skb);
3912
3913 sock_put(sk);
3914} 3916}
3915EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3917EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3916 3918
diff --git a/net/core/sock.c b/net/core/sock.c
index f6fd79f33097..a96d5f7a5734 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -197,66 +197,55 @@ EXPORT_SYMBOL(sk_net_capable);
197 197
198/* 198/*
199 * Each address family might have different locking rules, so we have 199 * Each address family might have different locking rules, so we have
200 * one slock key per address family: 200 * one slock key per address family and separate keys for internal and
201 * userspace sockets.
201 */ 202 */
202static struct lock_class_key af_family_keys[AF_MAX]; 203static struct lock_class_key af_family_keys[AF_MAX];
204static struct lock_class_key af_family_kern_keys[AF_MAX];
203static struct lock_class_key af_family_slock_keys[AF_MAX]; 205static struct lock_class_key af_family_slock_keys[AF_MAX];
206static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
204 207
205/* 208/*
206 * Make lock validator output more readable. (we pre-construct these 209 * Make lock validator output more readable. (we pre-construct these
207 * strings build-time, so that runtime initialization of socket 210 * strings build-time, so that runtime initialization of socket
208 * locks is fast): 211 * locks is fast):
209 */ 212 */
213
214#define _sock_locks(x) \
215 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
216 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
217 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
218 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
219 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
220 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
221 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
222 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
223 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
224 x "27" , x "28" , x "AF_CAN" , \
225 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
226 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
227 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
228 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
229 x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
230
210static const char *const af_family_key_strings[AF_MAX+1] = { 231static const char *const af_family_key_strings[AF_MAX+1] = {
211 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 232 _sock_locks("sk_lock-")
212 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
213 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
214 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
215 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
216 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
217 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
218 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
219 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
220 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
226}; 233};
227static const char *const af_family_slock_key_strings[AF_MAX+1] = { 234static const char *const af_family_slock_key_strings[AF_MAX+1] = {
228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 235 _sock_locks("slock-")
229 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
230 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
231 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
232 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
233 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
234 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
235 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
236 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
237 "slock-27" , "slock-28" , "slock-AF_CAN" ,
238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
243}; 236};
244static const char *const af_family_clock_key_strings[AF_MAX+1] = { 237static const char *const af_family_clock_key_strings[AF_MAX+1] = {
245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 238 _sock_locks("clock-")
246 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 239};
247 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 240
248 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 241static const char *const af_family_kern_key_strings[AF_MAX+1] = {
249 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 242 _sock_locks("k-sk_lock-")
250 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 243};
251 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 244static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
252 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 245 _sock_locks("k-slock-")
253 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 246};
254 "clock-27" , "clock-28" , "clock-AF_CAN" , 247static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 248 _sock_locks("k-clock-")
256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
260}; 249};
261 250
262/* 251/*
@@ -264,6 +253,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
264 * so split the lock classes by using a per-AF key: 253 * so split the lock classes by using a per-AF key:
265 */ 254 */
266static struct lock_class_key af_callback_keys[AF_MAX]; 255static struct lock_class_key af_callback_keys[AF_MAX];
256static struct lock_class_key af_kern_callback_keys[AF_MAX];
267 257
268/* Take into consideration the size of the struct sk_buff overhead in the 258/* Take into consideration the size of the struct sk_buff overhead in the
269 * determination of these values, since that is non-constant across 259 * determination of these values, since that is non-constant across
@@ -1293,7 +1283,16 @@ lenout:
1293 */ 1283 */
1294static inline void sock_lock_init(struct sock *sk) 1284static inline void sock_lock_init(struct sock *sk)
1295{ 1285{
1296 sock_lock_init_class_and_name(sk, 1286 if (sk->sk_kern_sock)
1287 sock_lock_init_class_and_name(
1288 sk,
1289 af_family_kern_slock_key_strings[sk->sk_family],
1290 af_family_kern_slock_keys + sk->sk_family,
1291 af_family_kern_key_strings[sk->sk_family],
1292 af_family_kern_keys + sk->sk_family);
1293 else
1294 sock_lock_init_class_and_name(
1295 sk,
1297 af_family_slock_key_strings[sk->sk_family], 1296 af_family_slock_key_strings[sk->sk_family],
1298 af_family_slock_keys + sk->sk_family, 1297 af_family_slock_keys + sk->sk_family,
1299 af_family_key_strings[sk->sk_family], 1298 af_family_key_strings[sk->sk_family],
@@ -1399,6 +1398,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1399 * why we need sk_prot_creator -acme 1398 * why we need sk_prot_creator -acme
1400 */ 1399 */
1401 sk->sk_prot = sk->sk_prot_creator = prot; 1400 sk->sk_prot = sk->sk_prot_creator = prot;
1401 sk->sk_kern_sock = kern;
1402 sock_lock_init(sk); 1402 sock_lock_init(sk);
1403 sk->sk_net_refcnt = kern ? 0 : 1; 1403 sk->sk_net_refcnt = kern ? 0 : 1;
1404 if (likely(sk->sk_net_refcnt)) 1404 if (likely(sk->sk_net_refcnt))
@@ -2277,7 +2277,8 @@ int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2277} 2277}
2278EXPORT_SYMBOL(sock_no_socketpair); 2278EXPORT_SYMBOL(sock_no_socketpair);
2279 2279
2280int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 2280int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2281 bool kern)
2281{ 2282{
2282 return -EOPNOTSUPP; 2283 return -EOPNOTSUPP;
2283} 2284}
@@ -2481,7 +2482,14 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2481 } 2482 }
2482 2483
2483 rwlock_init(&sk->sk_callback_lock); 2484 rwlock_init(&sk->sk_callback_lock);
2484 lockdep_set_class_and_name(&sk->sk_callback_lock, 2485 if (sk->sk_kern_sock)
2486 lockdep_set_class_and_name(
2487 &sk->sk_callback_lock,
2488 af_kern_callback_keys + sk->sk_family,
2489 af_family_kern_clock_key_strings[sk->sk_family]);
2490 else
2491 lockdep_set_class_and_name(
2492 &sk->sk_callback_lock,
2485 af_callback_keys + sk->sk_family, 2493 af_callback_keys + sk->sk_family,
2486 af_family_clock_key_strings[sk->sk_family]); 2494 af_family_clock_key_strings[sk->sk_family]);
2487 2495
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c..5e3a7302f774 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
749 for (i = 0; i < hc->tx_seqbufc; i++) 749 for (i = 0; i < hc->tx_seqbufc; i++)
750 kfree(hc->tx_seqbuf[i]); 750 kfree(hc->tx_seqbuf[i]);
751 hc->tx_seqbufc = 0; 751 hc->tx_seqbufc = 0;
752 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
752} 753}
753 754
754static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 755static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 409d0cfd3447..b99168b0fabf 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
289 289
290 switch (type) { 290 switch (type) {
291 case ICMP_REDIRECT: 291 case ICMP_REDIRECT:
292 dccp_do_redirect(skb, sk); 292 if (!sock_owned_by_user(sk))
293 dccp_do_redirect(skb, sk);
293 goto out; 294 goto out;
294 case ICMP_SOURCE_QUENCH: 295 case ICMP_SOURCE_QUENCH:
295 /* Just silently ignore these. */ 296 /* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 233b57367758..d9b6a4e403e7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
122 np = inet6_sk(sk); 122 np = inet6_sk(sk);
123 123
124 if (type == NDISC_REDIRECT) { 124 if (type == NDISC_REDIRECT) {
125 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 125 if (!sock_owned_by_user(sk)) {
126 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
126 127
127 if (dst) 128 if (dst)
128 dst->ops->redirect(dst, sk, skb); 129 dst->ops->redirect(dst, sk, skb);
130 }
129 goto out; 131 goto out;
130 } 132 }
131 133
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index e267e6f4c9a5..abd07a443219 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -142,6 +142,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
142 struct dccp_request_sock *dreq = dccp_rsk(req); 142 struct dccp_request_sock *dreq = dccp_rsk(req);
143 bool own_req; 143 bool own_req;
144 144
145 /* TCP/DCCP listeners became lockless.
146 * DCCP stores complex state in its request_sock, so we need
147 * a protection for them, now this code runs without being protected
148 * by the parent (listener) lock.
149 */
150 spin_lock_bh(&dreq->dreq_lock);
151
145 /* Check for retransmitted REQUEST */ 152 /* Check for retransmitted REQUEST */
146 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 153 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
147 154
@@ -156,7 +163,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
156 inet_rtx_syn_ack(sk, req); 163 inet_rtx_syn_ack(sk, req);
157 } 164 }
158 /* Network Duplicate, discard packet */ 165 /* Network Duplicate, discard packet */
159 return NULL; 166 goto out;
160 } 167 }
161 168
162 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 169 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -182,20 +189,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
182 189
183 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 190 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
184 req, &own_req); 191 req, &own_req);
185 if (!child) 192 if (child) {
186 goto listen_overflow; 193 child = inet_csk_complete_hashdance(sk, child, req, own_req);
187 194 goto out;
188 return inet_csk_complete_hashdance(sk, child, req, own_req); 195 }
189 196
190listen_overflow:
191 dccp_pr_debug("listen_overflow!\n");
192 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 197 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
193drop: 198drop:
194 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) 199 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
195 req->rsk_ops->send_reset(sk, skb); 200 req->rsk_ops->send_reset(sk, skb);
196 201
197 inet_csk_reqsk_queue_drop(sk, req); 202 inet_csk_reqsk_queue_drop(sk, req);
198 return NULL; 203out:
204 spin_unlock_bh(&dreq->dreq_lock);
205 return child;
199} 206}
200 207
201EXPORT_SYMBOL_GPL(dccp_check_req); 208EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -246,6 +253,7 @@ int dccp_reqsk_init(struct request_sock *req,
246{ 253{
247 struct dccp_request_sock *dreq = dccp_rsk(req); 254 struct dccp_request_sock *dreq = dccp_rsk(req);
248 255
256 spin_lock_init(&dreq->dreq_lock);
249 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; 257 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
250 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); 258 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
251 inet_rsk(req)->acked = 0; 259 inet_rsk(req)->acked = 0;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index e6e79eda9763..7de5b40a5d0d 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1070,7 +1070,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1070 return skb == NULL ? ERR_PTR(err) : skb; 1070 return skb == NULL ? ERR_PTR(err) : skb;
1071} 1071}
1072 1072
1073static int dn_accept(struct socket *sock, struct socket *newsock, int flags) 1073static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1074 bool kern)
1074{ 1075{
1075 struct sock *sk = sock->sk, *newsk; 1076 struct sock *sk = sock->sk, *newsk;
1076 struct sk_buff *skb = NULL; 1077 struct sk_buff *skb = NULL;
@@ -1099,7 +1100,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1099 1100
1100 cb = DN_SKB_CB(skb); 1101 cb = DN_SKB_CB(skb);
1101 sk->sk_ack_backlog--; 1102 sk->sk_ack_backlog--;
1102 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0); 1103 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1103 if (newsk == NULL) { 1104 if (newsk == NULL) {
1104 release_sock(sk); 1105 release_sock(sk);
1105 kfree_skb(skb); 1106 kfree_skb(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 602d40f43687..6b1fc6e4278e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -689,11 +689,12 @@ EXPORT_SYMBOL(inet_stream_connect);
689 * Accept a pending connection. The TCP layer now gives BSD semantics. 689 * Accept a pending connection. The TCP layer now gives BSD semantics.
690 */ 690 */
691 691
692int inet_accept(struct socket *sock, struct socket *newsock, int flags) 692int inet_accept(struct socket *sock, struct socket *newsock, int flags,
693 bool kern)
693{ 694{
694 struct sock *sk1 = sock->sk; 695 struct sock *sk1 = sock->sk;
695 int err = -EINVAL; 696 int err = -EINVAL;
696 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err); 697 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
697 698
698 if (!sk2) 699 if (!sk2)
699 goto do_err; 700 goto do_err;
@@ -1487,8 +1488,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
1487 int proto = iph->protocol; 1488 int proto = iph->protocol;
1488 int err = -ENOSYS; 1489 int err = -ENOSYS;
1489 1490
1490 if (skb->encapsulation) 1491 if (skb->encapsulation) {
1492 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1491 skb_set_inner_network_header(skb, nhoff); 1493 skb_set_inner_network_header(skb, nhoff);
1494 }
1492 1495
1493 csum_replace2(&iph->check, iph->tot_len, newlen); 1496 csum_replace2(&iph->check, iph->tot_len, newlen);
1494 iph->tot_len = newlen; 1497 iph->tot_len = newlen;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b4d5980ade3b..5e313c1ac94f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -424,7 +424,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
424/* 424/*
425 * This will accept the next outstanding connection. 425 * This will accept the next outstanding connection.
426 */ 426 */
427struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) 427struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
428{ 428{
429 struct inet_connection_sock *icsk = inet_csk(sk); 429 struct inet_connection_sock *icsk = inet_csk(sk);
430 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 430 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 737ce826d7ec..7a3fd25e8913 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -966,7 +966,7 @@ static int __ip_append_data(struct sock *sk,
966 cork->length += length; 966 cork->length += length;
967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
968 (sk->sk_protocol == IPPROTO_UDP) && 968 (sk->sk_protocol == IPPROTO_UDP) &&
969 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 969 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 970 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
971 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 971 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
972 hh_len, fragheaderlen, transhdrlen, 972 hh_len, fragheaderlen, transhdrlen,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9a89b8deafae..575e19dcc017 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -279,10 +279,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
279 */ 279 */
280void tcp_v4_mtu_reduced(struct sock *sk) 280void tcp_v4_mtu_reduced(struct sock *sk)
281{ 281{
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk); 282 struct inet_sock *inet = inet_sk(sk);
284 u32 mtu = tcp_sk(sk)->mtu_info; 283 struct dst_entry *dst;
284 u32 mtu;
285 285
286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
287 return;
288 mtu = tcp_sk(sk)->mtu_info;
286 dst = inet_csk_update_pmtu(sk, mtu); 289 dst = inet_csk_update_pmtu(sk, mtu);
287 if (!dst) 290 if (!dst)
288 return; 291 return;
@@ -428,7 +431,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
428 431
429 switch (type) { 432 switch (type) {
430 case ICMP_REDIRECT: 433 case ICMP_REDIRECT:
431 do_redirect(icmp_skb, sk); 434 if (!sock_owned_by_user(sk))
435 do_redirect(icmp_skb, sk);
432 goto out; 436 goto out;
433 case ICMP_SOURCE_QUENCH: 437 case ICMP_SOURCE_QUENCH:
434 /* Just silently ignore these. */ 438 /* Just silently ignore these. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 40d893556e67..b2ab411c6d37 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk)
249 249
250 sk_mem_reclaim_partial(sk); 250 sk_mem_reclaim_partial(sk);
251 251
252 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 252 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
253 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
253 goto out; 254 goto out;
254 255
255 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 256 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk)
552 struct inet_connection_sock *icsk = inet_csk(sk); 553 struct inet_connection_sock *icsk = inet_csk(sk);
553 int event; 554 int event;
554 555
555 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 556 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
557 !icsk->icsk_pending)
556 goto out; 558 goto out;
557 559
558 if (time_after(icsk->icsk_timeout, jiffies)) { 560 if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 04db40620ea6..a9a9553ee63d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -920,12 +920,12 @@ static int __init inet6_init(void)
920 err = register_pernet_subsys(&inet6_net_ops); 920 err = register_pernet_subsys(&inet6_net_ops);
921 if (err) 921 if (err)
922 goto register_pernet_fail; 922 goto register_pernet_fail;
923 err = icmpv6_init();
924 if (err)
925 goto icmp_fail;
926 err = ip6_mr_init(); 923 err = ip6_mr_init();
927 if (err) 924 if (err)
928 goto ipmr_fail; 925 goto ipmr_fail;
926 err = icmpv6_init();
927 if (err)
928 goto icmp_fail;
929 err = ndisc_init(); 929 err = ndisc_init();
930 if (err) 930 if (err)
931 goto ndisc_fail; 931 goto ndisc_fail;
@@ -1061,10 +1061,10 @@ igmp_fail:
1061 ndisc_cleanup(); 1061 ndisc_cleanup();
1062ndisc_fail: 1062ndisc_fail:
1063 ip6_mr_cleanup(); 1063 ip6_mr_cleanup();
1064ipmr_fail:
1065 icmpv6_cleanup();
1066icmp_fail: 1064icmp_fail:
1067 unregister_pernet_subsys(&inet6_net_ops); 1065 unregister_pernet_subsys(&inet6_net_ops);
1066ipmr_fail:
1067 icmpv6_cleanup();
1068register_pernet_fail: 1068register_pernet_fail:
1069 sock_unregister(PF_INET6); 1069 sock_unregister(PF_INET6);
1070 rtnl_unregister_all(PF_INET6); 1070 rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e4266746e4a2..d4bf2c68a545 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -923,6 +923,8 @@ add:
923 ins = &rt->dst.rt6_next; 923 ins = &rt->dst.rt6_next;
924 iter = *ins; 924 iter = *ins;
925 while (iter) { 925 while (iter) {
926 if (iter->rt6i_metric > rt->rt6i_metric)
927 break;
926 if (rt6_qualify_for_ecmp(iter)) { 928 if (rt6_qualify_for_ecmp(iter)) {
927 *ins = iter->dst.rt6_next; 929 *ins = iter->dst.rt6_next;
928 fib6_purge_rt(iter, fn, info->nl_net); 930 fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 0838e6d01d2e..93e58a5e1837 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
294 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); 294 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
295 int err = -ENOSYS; 295 int err = -ENOSYS;
296 296
297 if (skb->encapsulation) 297 if (skb->encapsulation) {
298 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
298 skb_set_inner_network_header(skb, nhoff); 299 skb_set_inner_network_header(skb, nhoff);
300 }
299 301
300 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); 302 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
301 303
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 528b3c1f3fde..58f6288e9ba5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -768,13 +768,14 @@ slow_path:
768 * Fragment the datagram. 768 * Fragment the datagram.
769 */ 769 */
770 770
771 *prevhdr = NEXTHDR_FRAGMENT;
772 troom = rt->dst.dev->needed_tailroom; 771 troom = rt->dst.dev->needed_tailroom;
773 772
774 /* 773 /*
775 * Keep copying data until we run out. 774 * Keep copying data until we run out.
776 */ 775 */
777 while (left > 0) { 776 while (left > 0) {
777 u8 *fragnexthdr_offset;
778
778 len = left; 779 len = left;
779 /* IF: it doesn't fit, use 'mtu' - the data space left */ 780 /* IF: it doesn't fit, use 'mtu' - the data space left */
780 if (len > mtu) 781 if (len > mtu)
@@ -819,6 +820,10 @@ slow_path:
819 */ 820 */
820 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); 821 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
821 822
823 fragnexthdr_offset = skb_network_header(frag);
824 fragnexthdr_offset += prevhdr - skb_network_header(skb);
825 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
826
822 /* 827 /*
823 * Build fragment header. 828 * Build fragment header.
824 */ 829 */
@@ -1385,7 +1390,7 @@ emsgsize:
1385 if ((((length + fragheaderlen) > mtu) || 1390 if ((((length + fragheaderlen) > mtu) ||
1386 (skb && skb_is_gso(skb))) && 1391 (skb && skb_is_gso(skb))) &&
1387 (sk->sk_protocol == IPPROTO_UDP) && 1392 (sk->sk_protocol == IPPROTO_UDP) &&
1388 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 1393 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
1389 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1394 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1390 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1395 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1391 hh_len, fragheaderlen, exthdrlen, 1396 hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 644ba59fbd9d..3d8a3b63b4fd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
485 if (!skb->ignore_df && skb->len > mtu) { 485 if (!skb->ignore_df && skb->len > mtu) {
486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); 486 skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
487 487
488 if (skb->protocol == htons(ETH_P_IPV6)) 488 if (skb->protocol == htons(ETH_P_IPV6)) {
489 if (mtu < IPV6_MIN_MTU)
490 mtu = IPV6_MIN_MTU;
491
489 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 492 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
490 else 493 } else {
491 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 494 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
492 htonl(mtu)); 495 htonl(mtu));
496 }
493 497
494 return -EMSGSIZE; 498 return -EMSGSIZE;
495 } 499 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 229bfcc451ef..35c58b669ebd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3299,7 +3299,6 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ 3299 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3300 + NLA_ALIGN(sizeof(struct rtnexthop)) 3300 + NLA_ALIGN(sizeof(struct rtnexthop))
3301 + nla_total_size(16) /* RTA_GATEWAY */ 3301 + nla_total_size(16) /* RTA_GATEWAY */
3302 + nla_total_size(4) /* RTA_OIF */
3303 + lwtunnel_get_encap_size(rt->dst.lwtstate); 3302 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3304 3303
3305 nexthop_len *= rt->rt6i_nsiblings; 3304 nexthop_len *= rt->rt6i_nsiblings;
@@ -3323,7 +3322,7 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
3323} 3322}
3324 3323
3325static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, 3324static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3326 unsigned int *flags) 3325 unsigned int *flags, bool skip_oif)
3327{ 3326{
3328 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { 3327 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3329 *flags |= RTNH_F_LINKDOWN; 3328 *flags |= RTNH_F_LINKDOWN;
@@ -3336,7 +3335,8 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3336 goto nla_put_failure; 3335 goto nla_put_failure;
3337 } 3336 }
3338 3337
3339 if (rt->dst.dev && 3338 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3339 if (!skip_oif && rt->dst.dev &&
3340 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) 3340 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3341 goto nla_put_failure; 3341 goto nla_put_failure;
3342 3342
@@ -3350,6 +3350,7 @@ nla_put_failure:
3350 return -EMSGSIZE; 3350 return -EMSGSIZE;
3351} 3351}
3352 3352
3353/* add multipath next hop */
3353static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) 3354static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3354{ 3355{
3355 struct rtnexthop *rtnh; 3356 struct rtnexthop *rtnh;
@@ -3362,7 +3363,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3362 rtnh->rtnh_hops = 0; 3363 rtnh->rtnh_hops = 0;
3363 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; 3364 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3364 3365
3365 if (rt6_nexthop_info(skb, rt, &flags) < 0) 3366 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3366 goto nla_put_failure; 3367 goto nla_put_failure;
3367 3368
3368 rtnh->rtnh_flags = flags; 3369 rtnh->rtnh_flags = flags;
@@ -3515,7 +3516,7 @@ static int rt6_fill_node(struct net *net,
3515 3516
3516 nla_nest_end(skb, mp); 3517 nla_nest_end(skb, mp);
3517 } else { 3518 } else {
3518 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0) 3519 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3519 goto nla_put_failure; 3520 goto nla_put_failure;
3520 } 3521 }
3521 3522
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 60a5295a7de6..49fa2e8c3fa9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -391,10 +391,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
391 np = inet6_sk(sk); 391 np = inet6_sk(sk);
392 392
393 if (type == NDISC_REDIRECT) { 393 if (type == NDISC_REDIRECT) {
394 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 394 if (!sock_owned_by_user(sk)) {
395 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
395 396
396 if (dst) 397 if (dst)
397 dst->ops->redirect(dst, sk, skb); 398 dst->ops->redirect(dst, sk, skb);
399 }
398 goto out; 400 goto out;
399 } 401 }
400 402
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 81adc29a448d..8d77ad5cadaf 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -828,7 +828,8 @@ out:
828 * Wait for incoming connection 828 * Wait for incoming connection
829 * 829 *
830 */ 830 */
831static int irda_accept(struct socket *sock, struct socket *newsock, int flags) 831static int irda_accept(struct socket *sock, struct socket *newsock, int flags,
832 bool kern)
832{ 833{
833 struct sock *sk = sock->sk; 834 struct sock *sk = sock->sk;
834 struct irda_sock *new, *self = irda_sk(sk); 835 struct irda_sock *new, *self = irda_sk(sk);
@@ -836,7 +837,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
836 struct sk_buff *skb = NULL; 837 struct sk_buff *skb = NULL;
837 int err; 838 int err;
838 839
839 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); 840 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, kern);
840 if (err) 841 if (err)
841 return err; 842 return err;
842 843
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 89bbde1081ce..84de7b6326dc 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -938,7 +938,7 @@ done:
938 938
939/* Accept a pending connection */ 939/* Accept a pending connection */
940static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 940static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
941 int flags) 941 int flags, bool kern)
942{ 942{
943 DECLARE_WAITQUEUE(wait, current); 943 DECLARE_WAITQUEUE(wait, current);
944 struct sock *sk = sock->sk, *nsk; 944 struct sock *sk = sock->sk, *nsk;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 06186d608a27..cb4fff785cbf 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -641,11 +641,13 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
641 * @sock: Socket which connections arrive on. 641 * @sock: Socket which connections arrive on.
642 * @newsock: Socket to move incoming connection to. 642 * @newsock: Socket to move incoming connection to.
643 * @flags: User specified operational flags. 643 * @flags: User specified operational flags.
644 * @kern: If the socket is kernel internal
644 * 645 *
645 * Accept a new incoming connection. 646 * Accept a new incoming connection.
646 * Returns 0 upon success, negative otherwise. 647 * Returns 0 upon success, negative otherwise.
647 */ 648 */
648static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) 649static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags,
650 bool kern)
649{ 651{
650 struct sock *sk = sock->sk, *newsk; 652 struct sock *sk = sock->sk, *newsk;
651 struct llc_sock *llc, *newllc; 653 struct llc_sock *llc, *newllc;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 3818686182b2..33211f9a2656 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1288,7 +1288,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
1288 /* fall through */ 1288 /* fall through */
1289 case NETDEV_CHANGE: 1289 case NETDEV_CHANGE:
1290 nh->nh_flags |= RTNH_F_LINKDOWN; 1290 nh->nh_flags |= RTNH_F_LINKDOWN;
1291 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; 1291 if (event != NETDEV_UNREGISTER)
1292 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
1292 break; 1293 break;
1293 } 1294 }
1294 if (event == NETDEV_UNREGISTER) 1295 if (event == NETDEV_UNREGISTER)
@@ -2028,6 +2029,7 @@ static void mpls_net_exit(struct net *net)
2028 for (index = 0; index < platform_labels; index++) { 2029 for (index = 0; index < platform_labels; index++) {
2029 struct mpls_route *rt = rtnl_dereference(platform_label[index]); 2030 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2030 RCU_INIT_POINTER(platform_label[index], NULL); 2031 RCU_INIT_POINTER(platform_label[index], NULL);
2032 mpls_notify_route(net, index, rt, NULL, NULL);
2031 mpls_rt_free(rt); 2033 mpls_rt_free(rt);
2032 } 2034 }
2033 rtnl_unlock(); 2035 rtnl_unlock();
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4bbf4526b885..ebf16f7f9089 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -765,7 +765,8 @@ out_release:
765 return err; 765 return err;
766} 766}
767 767
768static int nr_accept(struct socket *sock, struct socket *newsock, int flags) 768static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
769 bool kern)
769{ 770{
770 struct sk_buff *skb; 771 struct sk_buff *skb;
771 struct sock *newsk; 772 struct sock *newsk;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 879885b31cce..2ffb18e73df6 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -441,7 +441,7 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
441} 441}
442 442
443static int llcp_sock_accept(struct socket *sock, struct socket *newsock, 443static int llcp_sock_accept(struct socket *sock, struct socket *newsock,
444 int flags) 444 int flags, bool kern)
445{ 445{
446 DECLARE_WAITQUEUE(wait, current); 446 DECLARE_WAITQUEUE(wait, current);
447 struct sock *sk = sock->sk, *new_sk; 447 struct sock *sk = sock->sk, *new_sk;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 222bedcd9575..e81537991ddf 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -772,7 +772,8 @@ static void pep_sock_close(struct sock *sk, long timeout)
772 sock_put(sk); 772 sock_put(sk);
773} 773}
774 774
775static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) 775static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
776 bool kern)
776{ 777{
777 struct pep_sock *pn = pep_sk(sk), *newpn; 778 struct pep_sock *pn = pep_sk(sk), *newpn;
778 struct sock *newsk = NULL; 779 struct sock *newsk = NULL;
@@ -846,7 +847,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
846 } 847 }
847 848
848 /* Create a new to-be-accepted sock */ 849 /* Create a new to-be-accepted sock */
849 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0); 850 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
851 kern);
850 if (!newsk) { 852 if (!newsk) {
851 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); 853 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
852 err = -ENOBUFS; 854 err = -ENOBUFS;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index a6c8da3ee893..64634e3ec2fc 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -305,7 +305,7 @@ out:
305} 305}
306 306
307static int pn_socket_accept(struct socket *sock, struct socket *newsock, 307static int pn_socket_accept(struct socket *sock, struct socket *newsock,
308 int flags) 308 int flags, bool kern)
309{ 309{
310 struct sock *sk = sock->sk; 310 struct sock *sk = sock->sk;
311 struct sock *newsk; 311 struct sock *newsk;
@@ -314,7 +314,7 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
314 if (unlikely(sk->sk_state != TCP_LISTEN)) 314 if (unlikely(sk->sk_state != TCP_LISTEN))
315 return -EINVAL; 315 return -EINVAL;
316 316
317 newsk = sk->sk_prot->accept(sk, flags, &err); 317 newsk = sk->sk_prot->accept(sk, flags, &err, kern);
318 if (!newsk) 318 if (!newsk)
319 return err; 319 return err;
320 320
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 0e04dcceb1d4..1fa75ab7b733 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -429,6 +429,7 @@ void rds_conn_destroy(struct rds_connection *conn)
429 */ 429 */
430 rds_cong_remove_conn(conn); 430 rds_cong_remove_conn(conn);
431 431
432 put_net(conn->c_net);
432 kmem_cache_free(rds_conn_slab, conn); 433 kmem_cache_free(rds_conn_slab, conn);
433 434
434 spin_lock_irqsave(&rds_conn_lock, flags); 435 spin_lock_irqsave(&rds_conn_lock, flags);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ce3775abc6e7..1c38d2c7caa8 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -442,7 +442,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
442 ic->i_send_cq = NULL; 442 ic->i_send_cq = NULL;
443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector); 443 ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
444 rdsdebug("ib_create_cq send failed: %d\n", ret); 444 rdsdebug("ib_create_cq send failed: %d\n", ret);
445 goto out; 445 goto rds_ibdev_out;
446 } 446 }
447 447
448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); 448 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
@@ -456,19 +456,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
456 ic->i_recv_cq = NULL; 456 ic->i_recv_cq = NULL;
457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); 457 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
458 rdsdebug("ib_create_cq recv failed: %d\n", ret); 458 rdsdebug("ib_create_cq recv failed: %d\n", ret);
459 goto out; 459 goto send_cq_out;
460 } 460 }
461 461
462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); 462 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
463 if (ret) { 463 if (ret) {
464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret); 464 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
465 goto out; 465 goto recv_cq_out;
466 } 466 }
467 467
468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); 468 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
469 if (ret) { 469 if (ret) {
470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret); 470 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
471 goto out; 471 goto recv_cq_out;
472 } 472 }
473 473
474 /* XXX negotiate max send/recv with remote? */ 474 /* XXX negotiate max send/recv with remote? */
@@ -494,7 +494,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); 494 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
495 if (ret) { 495 if (ret) {
496 rdsdebug("rdma_create_qp failed: %d\n", ret); 496 rdsdebug("rdma_create_qp failed: %d\n", ret);
497 goto out; 497 goto recv_cq_out;
498 } 498 }
499 499
500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev, 500 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -504,7 +504,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
504 if (!ic->i_send_hdrs) { 504 if (!ic->i_send_hdrs) {
505 ret = -ENOMEM; 505 ret = -ENOMEM;
506 rdsdebug("ib_dma_alloc_coherent send failed\n"); 506 rdsdebug("ib_dma_alloc_coherent send failed\n");
507 goto out; 507 goto qp_out;
508 } 508 }
509 509
510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, 510 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -514,7 +514,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
514 if (!ic->i_recv_hdrs) { 514 if (!ic->i_recv_hdrs) {
515 ret = -ENOMEM; 515 ret = -ENOMEM;
516 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 516 rdsdebug("ib_dma_alloc_coherent recv failed\n");
517 goto out; 517 goto send_hdrs_dma_out;
518 } 518 }
519 519
520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 520 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -522,7 +522,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
522 if (!ic->i_ack) { 522 if (!ic->i_ack) {
523 ret = -ENOMEM; 523 ret = -ENOMEM;
524 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 524 rdsdebug("ib_dma_alloc_coherent ack failed\n");
525 goto out; 525 goto recv_hdrs_dma_out;
526 } 526 }
527 527
528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work), 528 ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -530,7 +530,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
530 if (!ic->i_sends) { 530 if (!ic->i_sends) {
531 ret = -ENOMEM; 531 ret = -ENOMEM;
532 rdsdebug("send allocation failed\n"); 532 rdsdebug("send allocation failed\n");
533 goto out; 533 goto ack_dma_out;
534 } 534 }
535 535
536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work), 536 ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -538,7 +538,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
538 if (!ic->i_recvs) { 538 if (!ic->i_recvs) {
539 ret = -ENOMEM; 539 ret = -ENOMEM;
540 rdsdebug("recv allocation failed\n"); 540 rdsdebug("recv allocation failed\n");
541 goto out; 541 goto sends_out;
542 } 542 }
543 543
544 rds_ib_recv_init_ack(ic); 544 rds_ib_recv_init_ack(ic);
@@ -546,8 +546,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, 546 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
547 ic->i_send_cq, ic->i_recv_cq); 547 ic->i_send_cq, ic->i_recv_cq);
548 548
549out: 549 return ret;
550
551sends_out:
552 vfree(ic->i_sends);
553ack_dma_out:
554 ib_dma_free_coherent(dev, sizeof(struct rds_header),
555 ic->i_ack, ic->i_ack_dma);
556recv_hdrs_dma_out:
557 ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
558 sizeof(struct rds_header),
559 ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
560send_hdrs_dma_out:
561 ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
562 sizeof(struct rds_header),
563 ic->i_send_hdrs, ic->i_send_hdrs_dma);
564qp_out:
565 rdma_destroy_qp(ic->i_cm_id);
566recv_cq_out:
567 if (!ib_destroy_cq(ic->i_recv_cq))
568 ic->i_recv_cq = NULL;
569send_cq_out:
570 if (!ib_destroy_cq(ic->i_send_cq))
571 ic->i_send_cq = NULL;
572rds_ibdev_out:
573 rds_ib_remove_conn(rds_ibdev, conn);
550 rds_ib_dev_put(rds_ibdev); 574 rds_ib_dev_put(rds_ibdev);
575
551 return ret; 576 return ret;
552} 577}
553 578
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 39518ef7af4d..82d38ccf5e8b 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -147,7 +147,7 @@ struct rds_connection {
147 147
148 /* Protocol version */ 148 /* Protocol version */
149 unsigned int c_version; 149 unsigned int c_version;
150 possible_net_t c_net; 150 struct net *c_net;
151 151
152 struct list_head c_map_item; 152 struct list_head c_map_item;
153 unsigned long c_map_queued; 153 unsigned long c_map_queued;
@@ -162,13 +162,13 @@ struct rds_connection {
162static inline 162static inline
163struct net *rds_conn_net(struct rds_connection *conn) 163struct net *rds_conn_net(struct rds_connection *conn)
164{ 164{
165 return read_pnet(&conn->c_net); 165 return conn->c_net;
166} 166}
167 167
168static inline 168static inline
169void rds_conn_net_set(struct rds_connection *conn, struct net *net) 169void rds_conn_net_set(struct rds_connection *conn, struct net *net)
170{ 170{
171 write_pnet(&conn->c_net, net); 171 conn->c_net = get_net(net);
172} 172}
173 173
174#define RDS_FLAG_CONG_BITMAP 0x01 174#define RDS_FLAG_CONG_BITMAP 0x01
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index a973d3b4dff0..225690076773 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
484 * we do need to clean up the listen socket here. 484 * we do need to clean up the listen socket here.
485 */ 485 */
486 if (rtn->rds_tcp_listen_sock) { 486 if (rtn->rds_tcp_listen_sock) {
487 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); 487 struct socket *lsock = rtn->rds_tcp_listen_sock;
488
488 rtn->rds_tcp_listen_sock = NULL; 489 rtn->rds_tcp_listen_sock = NULL;
489 flush_work(&rtn->rds_tcp_accept_w); 490 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
490 } 491 }
491} 492}
492 493
@@ -523,13 +524,13 @@ static void rds_tcp_kill_sock(struct net *net)
523 struct rds_tcp_connection *tc, *_tc; 524 struct rds_tcp_connection *tc, *_tc;
524 LIST_HEAD(tmp_list); 525 LIST_HEAD(tmp_list);
525 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 526 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
527 struct socket *lsock = rtn->rds_tcp_listen_sock;
526 528
527 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
528 rtn->rds_tcp_listen_sock = NULL; 529 rtn->rds_tcp_listen_sock = NULL;
529 flush_work(&rtn->rds_tcp_accept_w); 530 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
530 spin_lock_irq(&rds_tcp_conn_lock); 531 spin_lock_irq(&rds_tcp_conn_lock);
531 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 532 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
532 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 533 struct net *c_net = tc->t_cpath->cp_conn->c_net;
533 534
534 if (net != c_net || !tc->t_sock) 535 if (net != c_net || !tc->t_sock)
535 continue; 536 continue;
@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
546void *rds_tcp_listen_sock_def_readable(struct net *net) 547void *rds_tcp_listen_sock_def_readable(struct net *net)
547{ 548{
548 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 549 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
550 struct socket *lsock = rtn->rds_tcp_listen_sock;
551
552 if (!lsock)
553 return NULL;
549 554
550 return rtn->rds_tcp_listen_sock->sk->sk_user_data; 555 return lsock->sk->sk_user_data;
551} 556}
552 557
553static int rds_tcp_dev_event(struct notifier_block *this, 558static int rds_tcp_dev_event(struct notifier_block *this,
@@ -584,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
584 589
585 spin_lock_irq(&rds_tcp_conn_lock); 590 spin_lock_irq(&rds_tcp_conn_lock);
586 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 591 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
587 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 592 struct net *c_net = tc->t_cpath->cp_conn->c_net;
588 593
589 if (net != c_net || !tc->t_sock) 594 if (net != c_net || !tc->t_sock)
590 continue; 595 continue;
@@ -638,19 +643,19 @@ static int rds_tcp_init(void)
638 goto out; 643 goto out;
639 } 644 }
640 645
641 ret = register_netdevice_notifier(&rds_tcp_dev_notifier); 646 ret = rds_tcp_recv_init();
642 if (ret) { 647 if (ret)
643 pr_warn("could not register rds_tcp_dev_notifier\n");
644 goto out_slab; 648 goto out_slab;
645 }
646 649
647 ret = register_pernet_subsys(&rds_tcp_net_ops); 650 ret = register_pernet_subsys(&rds_tcp_net_ops);
648 if (ret) 651 if (ret)
649 goto out_notifier; 652 goto out_recv;
650 653
651 ret = rds_tcp_recv_init(); 654 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
652 if (ret) 655 if (ret) {
656 pr_warn("could not register rds_tcp_dev_notifier\n");
653 goto out_pernet; 657 goto out_pernet;
658 }
654 659
655 rds_trans_register(&rds_tcp_transport); 660 rds_trans_register(&rds_tcp_transport);
656 661
@@ -660,9 +665,8 @@ static int rds_tcp_init(void)
660 665
661out_pernet: 666out_pernet:
662 unregister_pernet_subsys(&rds_tcp_net_ops); 667 unregister_pernet_subsys(&rds_tcp_net_ops);
663out_notifier: 668out_recv:
664 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) 669 rds_tcp_recv_exit();
665 pr_warn("could not unregister rds_tcp_dev_notifier\n");
666out_slab: 670out_slab:
667 kmem_cache_destroy(rds_tcp_conn_slab); 671 kmem_cache_destroy(rds_tcp_conn_slab);
668out: 672out:
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc8906576..56ea6620fcf9 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
66 66
67/* tcp_listen.c */ 67/* tcp_listen.c */
68struct socket *rds_tcp_listen_init(struct net *); 68struct socket *rds_tcp_listen_init(struct net *);
69void rds_tcp_listen_stop(struct socket *); 69void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
70void rds_tcp_listen_data_ready(struct sock *sk); 70void rds_tcp_listen_data_ready(struct sock *sk);
71int rds_tcp_accept_one(struct socket *sock); 71int rds_tcp_accept_one(struct socket *sock);
72int rds_tcp_keepalive(struct socket *sock); 72int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 67d0929c7d3d..507678853e6c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -133,7 +133,7 @@ int rds_tcp_accept_one(struct socket *sock)
133 133
134 new_sock->type = sock->type; 134 new_sock->type = sock->type;
135 new_sock->ops = sock->ops; 135 new_sock->ops = sock->ops;
136 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); 136 ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);
137 if (ret < 0) 137 if (ret < 0)
138 goto out; 138 goto out;
139 139
@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
223 * before it has been accepted and the accepter has set up their 223 * before it has been accepted and the accepter has set up their
224 * data_ready.. we only want to queue listen work for our listening 224 * data_ready.. we only want to queue listen work for our listening
225 * socket 225 * socket
226 *
227 * (*ready)() may be null if we are racing with netns delete, and
228 * the listen socket is being torn down.
226 */ 229 */
227 if (sk->sk_state == TCP_LISTEN) 230 if (sk->sk_state == TCP_LISTEN)
228 rds_tcp_accept_work(sk); 231 rds_tcp_accept_work(sk);
@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
231 234
232out: 235out:
233 read_unlock_bh(&sk->sk_callback_lock); 236 read_unlock_bh(&sk->sk_callback_lock);
234 ready(sk); 237 if (ready)
238 ready(sk);
235} 239}
236 240
237struct socket *rds_tcp_listen_init(struct net *net) 241struct socket *rds_tcp_listen_init(struct net *net)
@@ -271,7 +275,7 @@ out:
271 return NULL; 275 return NULL;
272} 276}
273 277
274void rds_tcp_listen_stop(struct socket *sock) 278void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
275{ 279{
276 struct sock *sk; 280 struct sock *sk;
277 281
@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
292 296
293 /* wait for accepts to stop and close the socket */ 297 /* wait for accepts to stop and close the socket */
294 flush_workqueue(rds_wq); 298 flush_workqueue(rds_wq);
299 flush_work(acceptor);
295 sock_release(sock); 300 sock_release(sock);
296} 301}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index b8a1df2c9785..4a9729257023 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -871,7 +871,8 @@ out_release:
871 return err; 871 return err;
872} 872}
873 873
874static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 874static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
875 bool kern)
875{ 876{
876 struct sk_buff *skb; 877 struct sk_buff *skb;
877 struct sock *newsk; 878 struct sock *newsk;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9f4cfa25af7c..18b2ad8be8e2 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -420,6 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
420 u16 skew) 420 u16 skew)
421{ 421{
422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
423 enum rxrpc_call_state state;
423 unsigned int offset = sizeof(struct rxrpc_wire_header); 424 unsigned int offset = sizeof(struct rxrpc_wire_header);
424 unsigned int ix; 425 unsigned int ix;
425 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 426 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
@@ -434,14 +435,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
434 _proto("Rx DATA %%%u { #%u f=%02x }", 435 _proto("Rx DATA %%%u { #%u f=%02x }",
435 sp->hdr.serial, seq, sp->hdr.flags); 436 sp->hdr.serial, seq, sp->hdr.flags);
436 437
437 if (call->state >= RXRPC_CALL_COMPLETE) 438 state = READ_ONCE(call->state);
439 if (state >= RXRPC_CALL_COMPLETE)
438 return; 440 return;
439 441
440 /* Received data implicitly ACKs all of the request packets we sent 442 /* Received data implicitly ACKs all of the request packets we sent
441 * when we're acting as a client. 443 * when we're acting as a client.
442 */ 444 */
443 if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST || 445 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
444 call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 446 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
445 !rxrpc_receiving_reply(call)) 447 !rxrpc_receiving_reply(call))
446 return; 448 return;
447 449
@@ -650,6 +652,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
650 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 652 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
651 struct rxrpc_peer *peer; 653 struct rxrpc_peer *peer;
652 unsigned int mtu; 654 unsigned int mtu;
655 bool wake = false;
653 u32 rwind = ntohl(ackinfo->rwind); 656 u32 rwind = ntohl(ackinfo->rwind);
654 657
655 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", 658 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -657,9 +660,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
657 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), 660 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
658 rwind, ntohl(ackinfo->jumbo_max)); 661 rwind, ntohl(ackinfo->jumbo_max));
659 662
660 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) 663 if (call->tx_winsize != rwind) {
661 rwind = RXRPC_RXTX_BUFF_SIZE - 1; 664 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
662 call->tx_winsize = rwind; 665 rwind = RXRPC_RXTX_BUFF_SIZE - 1;
666 if (rwind > call->tx_winsize)
667 wake = true;
668 call->tx_winsize = rwind;
669 }
670
663 if (call->cong_ssthresh > rwind) 671 if (call->cong_ssthresh > rwind)
664 call->cong_ssthresh = rwind; 672 call->cong_ssthresh = rwind;
665 673
@@ -673,6 +681,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
673 spin_unlock_bh(&peer->lock); 681 spin_unlock_bh(&peer->lock);
674 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); 682 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
675 } 683 }
684
685 if (wake)
686 wake_up(&call->waitq);
676} 687}
677 688
678/* 689/*
@@ -799,7 +810,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
799 return rxrpc_proto_abort("AK0", call, 0); 810 return rxrpc_proto_abort("AK0", call, 0);
800 811
801 /* Ignore ACKs unless we are or have just been transmitting. */ 812 /* Ignore ACKs unless we are or have just been transmitting. */
802 switch (call->state) { 813 switch (READ_ONCE(call->state)) {
803 case RXRPC_CALL_CLIENT_SEND_REQUEST: 814 case RXRPC_CALL_CLIENT_SEND_REQUEST:
804 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 815 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
805 case RXRPC_CALL_SERVER_SEND_REPLY: 816 case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -940,7 +951,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
940static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, 951static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
941 struct rxrpc_call *call) 952 struct rxrpc_call *call)
942{ 953{
943 switch (call->state) { 954 switch (READ_ONCE(call->state)) {
944 case RXRPC_CALL_SERVER_AWAIT_ACK: 955 case RXRPC_CALL_SERVER_AWAIT_ACK:
945 rxrpc_call_completed(call); 956 rxrpc_call_completed(call);
946 break; 957 break;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 6491ca46a03f..3e2f1a8e9c5b 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -527,7 +527,7 @@ try_again:
527 msg->msg_namelen = len; 527 msg->msg_namelen = len;
528 } 528 }
529 529
530 switch (call->state) { 530 switch (READ_ONCE(call->state)) {
531 case RXRPC_CALL_SERVER_ACCEPTING: 531 case RXRPC_CALL_SERVER_ACCEPTING:
532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); 532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
533 break; 533 break;
@@ -640,7 +640,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
640 640
641 mutex_lock(&call->user_mutex); 641 mutex_lock(&call->user_mutex);
642 642
643 switch (call->state) { 643 switch (READ_ONCE(call->state)) {
644 case RXRPC_CALL_CLIENT_RECV_REPLY: 644 case RXRPC_CALL_CLIENT_RECV_REPLY:
645 case RXRPC_CALL_SERVER_RECV_REQUEST: 645 case RXRPC_CALL_SERVER_RECV_REQUEST:
646 case RXRPC_CALL_SERVER_ACK_REQUEST: 646 case RXRPC_CALL_SERVER_ACK_REQUEST:
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bc2d3dcff9de..97ab214ca411 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -488,6 +488,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
489 __releases(&rx->sk.sk_lock.slock) 489 __releases(&rx->sk.sk_lock.slock)
490{ 490{
491 enum rxrpc_call_state state;
491 enum rxrpc_command cmd; 492 enum rxrpc_command cmd;
492 struct rxrpc_call *call; 493 struct rxrpc_call *call;
493 unsigned long user_call_ID = 0; 494 unsigned long user_call_ID = 0;
@@ -526,13 +527,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
526 return PTR_ERR(call); 527 return PTR_ERR(call);
527 /* ... and we have the call lock. */ 528 /* ... and we have the call lock. */
528 } else { 529 } else {
529 ret = -EBUSY; 530 switch (READ_ONCE(call->state)) {
530 if (call->state == RXRPC_CALL_UNINITIALISED || 531 case RXRPC_CALL_UNINITIALISED:
531 call->state == RXRPC_CALL_CLIENT_AWAIT_CONN || 532 case RXRPC_CALL_CLIENT_AWAIT_CONN:
532 call->state == RXRPC_CALL_SERVER_PREALLOC || 533 case RXRPC_CALL_SERVER_PREALLOC:
533 call->state == RXRPC_CALL_SERVER_SECURING || 534 case RXRPC_CALL_SERVER_SECURING:
534 call->state == RXRPC_CALL_SERVER_ACCEPTING) 535 case RXRPC_CALL_SERVER_ACCEPTING:
536 ret = -EBUSY;
535 goto error_release_sock; 537 goto error_release_sock;
538 default:
539 break;
540 }
536 541
537 ret = mutex_lock_interruptible(&call->user_mutex); 542 ret = mutex_lock_interruptible(&call->user_mutex);
538 release_sock(&rx->sk); 543 release_sock(&rx->sk);
@@ -542,10 +547,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
542 } 547 }
543 } 548 }
544 549
550 state = READ_ONCE(call->state);
545 _debug("CALL %d USR %lx ST %d on CONN %p", 551 _debug("CALL %d USR %lx ST %d on CONN %p",
546 call->debug_id, call->user_call_ID, call->state, call->conn); 552 call->debug_id, call->user_call_ID, state, call->conn);
547 553
548 if (call->state >= RXRPC_CALL_COMPLETE) { 554 if (state >= RXRPC_CALL_COMPLETE) {
549 /* it's too late for this call */ 555 /* it's too late for this call */
550 ret = -ESHUTDOWN; 556 ret = -ESHUTDOWN;
551 } else if (cmd == RXRPC_CMD_SEND_ABORT) { 557 } else if (cmd == RXRPC_CMD_SEND_ABORT) {
@@ -555,12 +561,12 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
555 } else if (cmd != RXRPC_CMD_SEND_DATA) { 561 } else if (cmd != RXRPC_CMD_SEND_DATA) {
556 ret = -EINVAL; 562 ret = -EINVAL;
557 } else if (rxrpc_is_client_call(call) && 563 } else if (rxrpc_is_client_call(call) &&
558 call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { 564 state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
559 /* request phase complete for this client call */ 565 /* request phase complete for this client call */
560 ret = -EPROTO; 566 ret = -EPROTO;
561 } else if (rxrpc_is_service_call(call) && 567 } else if (rxrpc_is_service_call(call) &&
562 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 568 state != RXRPC_CALL_SERVER_ACK_REQUEST &&
563 call->state != RXRPC_CALL_SERVER_SEND_REPLY) { 569 state != RXRPC_CALL_SERVER_SEND_REPLY) {
564 /* Reply phase not begun or not complete for service call. */ 570 /* Reply phase not begun or not complete for service call. */
565 ret = -EPROTO; 571 ret = -EPROTO;
566 } else { 572 } else {
@@ -605,14 +611,21 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
605 _debug("CALL %d USR %lx ST %d on CONN %p", 611 _debug("CALL %d USR %lx ST %d on CONN %p",
606 call->debug_id, call->user_call_ID, call->state, call->conn); 612 call->debug_id, call->user_call_ID, call->state, call->conn);
607 613
608 if (call->state >= RXRPC_CALL_COMPLETE) { 614 switch (READ_ONCE(call->state)) {
609 ret = -ESHUTDOWN; /* it's too late for this call */ 615 case RXRPC_CALL_CLIENT_SEND_REQUEST:
610 } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && 616 case RXRPC_CALL_SERVER_ACK_REQUEST:
611 call->state != RXRPC_CALL_SERVER_ACK_REQUEST && 617 case RXRPC_CALL_SERVER_SEND_REPLY:
612 call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
613 ret = -EPROTO; /* request phase complete for this client call */
614 } else {
615 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len); 618 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
619 break;
620 case RXRPC_CALL_COMPLETE:
621 read_lock_bh(&call->state_lock);
622 ret = -call->error;
623 read_unlock_bh(&call->state_lock);
624 break;
625 default:
626 /* Request phase complete for this client call */
627 ret = -EPROTO;
628 break;
616 } 629 }
617 630
618 mutex_unlock(&call->user_mutex); 631 mutex_unlock(&call->user_mutex);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ab8062909962..f9bb43c25697 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
113 if (ret < 0) 113 if (ret < 0)
114 return ret; 114 return ret;
115 115
116 if (!tb[TCA_CONNMARK_PARMS])
117 return -EINVAL;
118
116 parm = nla_data(tb[TCA_CONNMARK_PARMS]); 119 parm = nla_data(tb[TCA_CONNMARK_PARMS]);
117 120
118 if (!tcf_hash_check(tn, parm->index, a, bind)) { 121 if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 3b7074e23024..c736627f8f4a 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
228 228
229 return skb->len; 229 return skb->len;
230nla_put_failure: 230nla_put_failure:
231 rcu_read_unlock();
232 nlmsg_trim(skb, b); 231 nlmsg_trim(skb, b);
233 return -1; 232 return -1;
234} 233}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 063baac5b9fe..961ee59f696a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -640,14 +640,15 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
640 640
641/* Create and initialize a new sk for the socket to be returned by accept(). */ 641/* Create and initialize a new sk for the socket to be returned by accept(). */
642static struct sock *sctp_v6_create_accept_sk(struct sock *sk, 642static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
643 struct sctp_association *asoc) 643 struct sctp_association *asoc,
644 bool kern)
644{ 645{
645 struct sock *newsk; 646 struct sock *newsk;
646 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 647 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
647 struct sctp6_sock *newsctp6sk; 648 struct sctp6_sock *newsctp6sk;
648 struct ipv6_txoptions *opt; 649 struct ipv6_txoptions *opt;
649 650
650 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); 651 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
651 if (!newsk) 652 if (!newsk)
652 goto out; 653 goto out;
653 654
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1b6d4574d2b0..989a900383b5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -575,10 +575,11 @@ static int sctp_v4_is_ce(const struct sk_buff *skb)
575 575
576/* Create and initialize a new sk for the socket returned by accept(). */ 576/* Create and initialize a new sk for the socket returned by accept(). */
577static struct sock *sctp_v4_create_accept_sk(struct sock *sk, 577static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
578 struct sctp_association *asoc) 578 struct sctp_association *asoc,
579 bool kern)
579{ 580{
580 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, 581 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
581 sk->sk_prot, 0); 582 sk->sk_prot, kern);
582 struct inet_sock *newinet; 583 struct inet_sock *newinet;
583 584
584 if (!newsk) 585 if (!newsk)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6f0a9be50f50..0f378ea2ae38 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4116,7 +4116,7 @@ static int sctp_disconnect(struct sock *sk, int flags)
4116 * descriptor will be returned from accept() to represent the newly 4116 * descriptor will be returned from accept() to represent the newly
4117 * formed association. 4117 * formed association.
4118 */ 4118 */
4119static struct sock *sctp_accept(struct sock *sk, int flags, int *err) 4119static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
4120{ 4120{
4121 struct sctp_sock *sp; 4121 struct sctp_sock *sp;
4122 struct sctp_endpoint *ep; 4122 struct sctp_endpoint *ep;
@@ -4151,7 +4151,7 @@ static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
4151 */ 4151 */
4152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); 4152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
4153 4153
4154 newsk = sp->pf->create_accept_sk(sk, asoc); 4154 newsk = sp->pf->create_accept_sk(sk, asoc, kern);
4155 if (!newsk) { 4155 if (!newsk) {
4156 error = -ENOMEM; 4156 error = -ENOMEM;
4157 goto out; 4157 goto out;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 85837ab90e89..093803786eac 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -944,7 +944,7 @@ out:
944} 944}
945 945
946static int smc_accept(struct socket *sock, struct socket *new_sock, 946static int smc_accept(struct socket *sock, struct socket *new_sock,
947 int flags) 947 int flags, bool kern)
948{ 948{
949 struct sock *sk = sock->sk, *nsk; 949 struct sock *sk = sock->sk, *nsk;
950 DECLARE_WAITQUEUE(wait, current); 950 DECLARE_WAITQUEUE(wait, current);
diff --git a/net/socket.c b/net/socket.c
index 2c1e8677ff2d..e034fe4164be 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1506,7 +1506,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1506 if (err) 1506 if (err)
1507 goto out_fd; 1507 goto out_fd;
1508 1508
1509 err = sock->ops->accept(sock, newsock, sock->file->f_flags); 1509 err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);
1510 if (err < 0) 1510 if (err < 0)
1511 goto out_fd; 1511 goto out_fd;
1512 1512
@@ -1731,6 +1731,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1731 /* We assume all kernel code knows the size of sockaddr_storage */ 1731 /* We assume all kernel code knows the size of sockaddr_storage */
1732 msg.msg_namelen = 0; 1732 msg.msg_namelen = 0;
1733 msg.msg_iocb = NULL; 1733 msg.msg_iocb = NULL;
1734 msg.msg_flags = 0;
1734 if (sock->file->f_flags & O_NONBLOCK) 1735 if (sock->file->f_flags & O_NONBLOCK)
1735 flags |= MSG_DONTWAIT; 1736 flags |= MSG_DONTWAIT;
1736 err = sock_recvmsg(sock, &msg, flags); 1737 err = sock_recvmsg(sock, &msg, flags);
@@ -3238,7 +3239,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
3238 if (err < 0) 3239 if (err < 0)
3239 goto done; 3240 goto done;
3240 3241
3241 err = sock->ops->accept(sock, *newsock, flags); 3242 err = sock->ops->accept(sock, *newsock, flags, true);
3242 if (err < 0) { 3243 if (err < 0) {
3243 sock_release(*newsock); 3244 sock_release(*newsock);
3244 *newsock = NULL; 3245 *newsock = NULL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 43e4045e72bc..7130e73bd42c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -115,7 +115,8 @@ static void tipc_data_ready(struct sock *sk);
115static void tipc_write_space(struct sock *sk); 115static void tipc_write_space(struct sock *sk);
116static void tipc_sock_destruct(struct sock *sk); 116static void tipc_sock_destruct(struct sock *sk);
117static int tipc_release(struct socket *sock); 117static int tipc_release(struct socket *sock);
118static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 118static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
119 bool kern);
119static void tipc_sk_timeout(unsigned long data); 120static void tipc_sk_timeout(unsigned long data);
120static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 121static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
121 struct tipc_name_seq const *seq); 122 struct tipc_name_seq const *seq);
@@ -2029,7 +2030,8 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
2029 * 2030 *
2030 * Returns 0 on success, errno otherwise 2031 * Returns 0 on success, errno otherwise
2031 */ 2032 */
2032static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) 2033static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2034 bool kern)
2033{ 2035{
2034 struct sock *new_sk, *sk = sock->sk; 2036 struct sock *new_sk, *sk = sock->sk;
2035 struct sk_buff *buf; 2037 struct sk_buff *buf;
@@ -2051,7 +2053,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2051 2053
2052 buf = skb_peek(&sk->sk_receive_queue); 2054 buf = skb_peek(&sk->sk_receive_queue);
2053 2055
2054 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0); 2056 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2055 if (res) 2057 if (res)
2056 goto exit; 2058 goto exit;
2057 security_sk_clone(sock->sk, new_sock->sk); 2059 security_sk_clone(sock->sk, new_sock->sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ee37b390260a..928691c43408 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -636,7 +636,7 @@ static int unix_bind(struct socket *, struct sockaddr *, int);
636static int unix_stream_connect(struct socket *, struct sockaddr *, 636static int unix_stream_connect(struct socket *, struct sockaddr *,
637 int addr_len, int flags); 637 int addr_len, int flags);
638static int unix_socketpair(struct socket *, struct socket *); 638static int unix_socketpair(struct socket *, struct socket *);
639static int unix_accept(struct socket *, struct socket *, int); 639static int unix_accept(struct socket *, struct socket *, int, bool);
640static int unix_getname(struct socket *, struct sockaddr *, int *, int); 640static int unix_getname(struct socket *, struct sockaddr *, int *, int);
641static unsigned int unix_poll(struct file *, struct socket *, poll_table *); 641static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
642static unsigned int unix_dgram_poll(struct file *, struct socket *, 642static unsigned int unix_dgram_poll(struct file *, struct socket *,
@@ -1402,7 +1402,8 @@ static void unix_sock_inherit_flags(const struct socket *old,
1402 set_bit(SOCK_PASSSEC, &new->flags); 1402 set_bit(SOCK_PASSSEC, &new->flags);
1403} 1403}
1404 1404
1405static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1405static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1406 bool kern)
1406{ 1407{
1407 struct sock *sk = sock->sk; 1408 struct sock *sk = sock->sk;
1408 struct sock *tsk; 1409 struct sock *tsk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9192ead66751..9f770f33c100 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1250,7 +1250,8 @@ out:
1250 return err; 1250 return err;
1251} 1251}
1252 1252
1253static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) 1253static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1254 bool kern)
1254{ 1255{
1255 struct sock *listener; 1256 struct sock *listener;
1256 int err; 1257 int err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fd28a49dbe8f..8b911c29860e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -852,7 +852,8 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
852 return rc; 852 return rc;
853} 853}
854 854
855static int x25_accept(struct socket *sock, struct socket *newsock, int flags) 855static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
856 bool kern)
856{ 857{
857 struct sock *sk = sock->sk; 858 struct sock *sk = sock->sk;
858 struct sock *newsk; 859 struct sock *newsk;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0806dccdf507..236cbbc0ab9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1243,7 +1243,7 @@ static inline int policy_to_flow_dir(int dir)
1243} 1243}
1244 1244
1245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 1245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1246 const struct flowi *fl) 1246 const struct flowi *fl, u16 family)
1247{ 1247{
1248 struct xfrm_policy *pol; 1248 struct xfrm_policy *pol;
1249 1249
@@ -1251,8 +1251,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1251 again: 1251 again:
1252 pol = rcu_dereference(sk->sk_policy[dir]); 1252 pol = rcu_dereference(sk->sk_policy[dir]);
1253 if (pol != NULL) { 1253 if (pol != NULL) {
1254 bool match = xfrm_selector_match(&pol->selector, fl, 1254 bool match = xfrm_selector_match(&pol->selector, fl, family);
1255 sk->sk_family);
1256 int err = 0; 1255 int err = 0;
1257 1256
1258 if (match) { 1257 if (match) {
@@ -2239,7 +2238,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2239 sk = sk_const_to_full_sk(sk); 2238 sk = sk_const_to_full_sk(sk);
2240 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2239 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2241 num_pols = 1; 2240 num_pols = 1;
2242 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2241 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2243 err = xfrm_expand_policies(fl, family, pols, 2242 err = xfrm_expand_policies(fl, family, pols,
2244 &num_pols, &num_xfrms); 2243 &num_pols, &num_xfrms);
2245 if (err < 0) 2244 if (err < 0)
@@ -2518,7 +2517,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2518 pol = NULL; 2517 pol = NULL;
2519 sk = sk_to_full_sk(sk); 2518 sk = sk_to_full_sk(sk);
2520 if (sk && sk->sk_policy[dir]) { 2519 if (sk && sk->sk_policy[dir]) {
2521 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2520 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2522 if (IS_ERR(pol)) { 2521 if (IS_ERR(pol)) {
2523 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2522 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2524 return 0; 2523 return 0;
@@ -3069,6 +3068,11 @@ static int __net_init xfrm_net_init(struct net *net)
3069{ 3068{
3070 int rv; 3069 int rv;
3071 3070
3071 /* Initialize the per-net locks here */
3072 spin_lock_init(&net->xfrm.xfrm_state_lock);
3073 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3074 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3075
3072 rv = xfrm_statistics_init(net); 3076 rv = xfrm_statistics_init(net);
3073 if (rv < 0) 3077 if (rv < 0)
3074 goto out_statistics; 3078 goto out_statistics;
@@ -3085,11 +3089,6 @@ static int __net_init xfrm_net_init(struct net *net)
3085 if (rv < 0) 3089 if (rv < 0)
3086 goto out; 3090 goto out;
3087 3091
3088 /* Initialize the per-net locks here */
3089 spin_lock_init(&net->xfrm.xfrm_state_lock);
3090 spin_lock_init(&net->xfrm.xfrm_policy_lock);
3091 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3092
3093 return 0; 3092 return 0;
3094 3093
3095out: 3094out:
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index 9b0b5cbc5b89..0f98634c20a0 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -133,7 +133,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc
133#if BUILDING_GCC_VERSION < 6000 133#if BUILDING_GCC_VERSION < 6000
134 register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL); 134 register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL);
135 register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_sancov); 135 register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_sancov);
136 register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info); 136 register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_pass_info);
137#endif 137#endif
138 138
139 return 0; 139 return 0;
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
index cf7e52e4781b..9b6e246a45d0 100644
--- a/scripts/module-common.lds
+++ b/scripts/module-common.lds
@@ -22,4 +22,6 @@ SECTIONS {
22 22
23 . = ALIGN(8); 23 . = ALIGN(8);
24 .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) } 24 .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) }
25
26 __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
25} 27}
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 0458b037c8a1..0545f5a8cabe 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -372,6 +372,8 @@ disassocation||disassociation
372disapear||disappear 372disapear||disappear
373disapeared||disappeared 373disapeared||disappeared
374disappared||disappeared 374disappared||disappeared
375disble||disable
376disbled||disabled
375disconnet||disconnect 377disconnet||disconnect
376discontinous||discontinuous 378discontinous||discontinuous
377dispertion||dispersion 379dispertion||dispersion
@@ -732,6 +734,7 @@ oustanding||outstanding
732overaall||overall 734overaall||overall
733overhread||overhead 735overhread||overhead
734overlaping||overlapping 736overlaping||overlapping
737overide||override
735overrided||overridden 738overrided||overridden
736overriden||overridden 739overriden||overridden
737overun||overrun 740overun||overrun
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index ec1067a679da..08b1399d1da2 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -89,7 +89,7 @@ static void acp_reg_write(u32 val, void __iomem *acp_mmio, u32 reg)
89 writel(val, acp_mmio + (reg * 4)); 89 writel(val, acp_mmio + (reg * 4));
90} 90}
91 91
92/* Configure a given dma channel parameters - enable/disble, 92/* Configure a given dma channel parameters - enable/disable,
93 * number of descriptors, priority 93 * number of descriptors, priority
94 */ 94 */
95static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num, 95static void config_acp_dma_channel(void __iomem *acp_mmio, u8 ch_num,
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
8#define _UAPI__LINUX_BPF_PERF_EVENT_H__
9
10#include <linux/types.h>
11#include <linux/ptrace.h>
12
13struct bpf_perf_event_data {
14 struct pt_regs regs;
15 __u64 sample_period;
16};
17
18#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index 11c8d9bc762e..5d19fdf80292 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -1387,7 +1387,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
1387 /* Allow writing to any other BAR, or expansion ROM */ 1387 /* Allow writing to any other BAR, or expansion ROM */
1388 iowrite(portoff, val, mask, &d->config_words[reg]); 1388 iowrite(portoff, val, mask, &d->config_words[reg]);
1389 return true; 1389 return true;
1390 /* We let them overide latency timer and cacheline size */ 1390 /* We let them override latency timer and cacheline size */
1391 } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) { 1391 } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
1392 /* Only let them change the first two fields. */ 1392 /* Only let them change the first two fields. */
1393 if (mask == 0xFFFFFFFF) 1393 if (mask == 0xFFFFFFFF)
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index e2efddf10231..1f5300e56b44 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -132,7 +132,7 @@ else
132 Q = @ 132 Q = @
133endif 133endif
134 134
135# Disable command line variables (CFLAGS) overide from top 135# Disable command line variables (CFLAGS) override from top
136# level Makefile (perf), otherwise build Makefile will get 136# level Makefile (perf), otherwise build Makefile will get
137# the same command line setup. 137# the same command line setup.
138MAKEOVERRIDES= 138MAKEOVERRIDES=
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 47076b15eebe..9b8555ea3459 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -135,7 +135,7 @@ else
135 Q = @ 135 Q = @
136endif 136endif
137 137
138# Disable command line variables (CFLAGS) overide from top 138# Disable command line variables (CFLAGS) override from top
139# level Makefile (perf), otherwise build Makefile will get 139# level Makefile (perf), otherwise build Makefile will get
140# the same command line setup. 140# the same command line setup.
141MAKEOVERRIDES= 141MAKEOVERRIDES=
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 66342804161c..0c03538df74c 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -140,7 +140,7 @@ struct pevent_plugin_option {
140 * struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = { 140 * struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
141 * { 141 * {
142 * .name = "option-name", 142 * .name = "option-name",
143 * .plugin_alias = "overide-file-name", (optional) 143 * .plugin_alias = "override-file-name", (optional)
144 * .description = "description of option to show users", 144 * .description = "description of option to show users",
145 * }, 145 * },
146 * { 146 * {
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 4cfdbb5b6967..066086dd59a8 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -805,11 +805,20 @@ static struct rela *find_switch_table(struct objtool_file *file,
805 insn->jump_dest->offset > orig_insn->offset)) 805 insn->jump_dest->offset > orig_insn->offset))
806 break; 806 break;
807 807
808 /* look for a relocation which references .rodata */
808 text_rela = find_rela_by_dest_range(insn->sec, insn->offset, 809 text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
809 insn->len); 810 insn->len);
810 if (text_rela && text_rela->sym == file->rodata->sym) 811 if (!text_rela || text_rela->sym != file->rodata->sym)
811 return find_rela_by_dest(file->rodata, 812 continue;
812 text_rela->addend); 813
814 /*
815 * Make sure the .rodata address isn't associated with a
816 * symbol. gcc jump tables are anonymous data.
817 */
818 if (find_symbol_containing(file->rodata, text_rela->addend))
819 continue;
820
821 return find_rela_by_dest(file->rodata, text_rela->addend);
813 } 822 }
814 823
815 return NULL; 824 return NULL;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 0d7983ac63ef..d897702ce742 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
85 return NULL; 85 return NULL;
86} 86}
87 87
88struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
89{
90 struct symbol *sym;
91
92 list_for_each_entry(sym, &sec->symbol_list, list)
93 if (sym->type != STT_SECTION &&
94 offset >= sym->offset && offset < sym->offset + sym->len)
95 return sym;
96
97 return NULL;
98}
99
88struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, 100struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
89 unsigned int len) 101 unsigned int len)
90{ 102{
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index aa1ff6596684..731973e1a3f5 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -79,6 +79,7 @@ struct elf {
79struct elf *elf_open(const char *name); 79struct elf *elf_open(const char *name);
80struct section *find_section_by_name(struct elf *elf, const char *name); 80struct section *find_section_by_name(struct elf *elf, const char *name);
81struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); 81struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
82struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
82struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); 83struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
83struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, 84struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
84 unsigned int len); 85 unsigned int len);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 7913363bde5c..4f3c758d875d 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -31,7 +31,7 @@
31#error Instruction buffer size too small 31#error Instruction buffer size too small
32#endif 32#endif
33 33
34/* Based on branch_type() from perf_event_intel_lbr.c */ 34/* Based on branch_type() from arch/x86/events/intel/lbr.c */
35static void intel_pt_insn_decoder(struct insn *insn, 35static void intel_pt_insn_decoder(struct insn *insn,
36 struct intel_pt_insn *intel_pt_insn) 36 struct intel_pt_insn *intel_pt_insn)
37{ 37{
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 6e4eb2fc2d1e..0c8b61f8398e 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1880,6 +1880,7 @@ sub get_grub_index {
1880sub wait_for_input 1880sub wait_for_input
1881{ 1881{
1882 my ($fp, $time) = @_; 1882 my ($fp, $time) = @_;
1883 my $start_time;
1883 my $rin; 1884 my $rin;
1884 my $rout; 1885 my $rout;
1885 my $nr; 1886 my $nr;
@@ -1895,17 +1896,22 @@ sub wait_for_input
1895 vec($rin, fileno($fp), 1) = 1; 1896 vec($rin, fileno($fp), 1) = 1;
1896 vec($rin, fileno(\*STDIN), 1) = 1; 1897 vec($rin, fileno(\*STDIN), 1) = 1;
1897 1898
1899 $start_time = time;
1900
1898 while (1) { 1901 while (1) {
1899 $nr = select($rout=$rin, undef, undef, $time); 1902 $nr = select($rout=$rin, undef, undef, $time);
1900 1903
1901 if ($nr <= 0) { 1904 last if ($nr <= 0);
1902 return undef;
1903 }
1904 1905
1905 # copy data from stdin to the console 1906 # copy data from stdin to the console
1906 if (vec($rout, fileno(\*STDIN), 1) == 1) { 1907 if (vec($rout, fileno(\*STDIN), 1) == 1) {
1907 sysread(\*STDIN, $buf, 1000); 1908 $nr = sysread(\*STDIN, $buf, 1000);
1908 syswrite($fp, $buf, 1000); 1909 syswrite($fp, $buf, $nr) if ($nr > 0);
1910 }
1911
1912 # The timeout is based on time waiting for the fp data
1913 if (vec($rout, fileno($fp), 1) != 1) {
1914 last if (defined($time) && (time - $start_time > $time));
1909 next; 1915 next;
1910 } 1916 }
1911 1917
@@ -1917,12 +1923,11 @@ sub wait_for_input
1917 last if ($ch eq "\n"); 1923 last if ($ch eq "\n");
1918 } 1924 }
1919 1925
1920 if (!length($line)) { 1926 last if (!length($line));
1921 return undef;
1922 }
1923 1927
1924 return $line; 1928 return $line;
1925 } 1929 }
1930 return undef;
1926} 1931}
1927 1932
1928sub reboot_to { 1933sub reboot_to {
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index f11315bedefc..6a9480c03cbd 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,6 +1,7 @@
1 1
2CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address 2CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
3LDFLAGS += -lpthread -lurcu 3LDFLAGS += -fsanitize=address
4LDLIBS+= -lpthread -lurcu
4TARGETS = main idr-test multiorder 5TARGETS = main idr-test multiorder
5CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o 6CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
6OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ 7OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
@@ -10,23 +11,25 @@ ifndef SHIFT
10 SHIFT=3 11 SHIFT=3
11endif 12endif
12 13
14ifeq ($(BUILD), 32)
15 CFLAGS += -m32
16 LDFLAGS += -m32
17endif
18
13targets: mapshift $(TARGETS) 19targets: mapshift $(TARGETS)
14 20
15main: $(OFILES) 21main: $(OFILES)
16 $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
17 22
18idr-test: idr-test.o $(CORE_OFILES) 23idr-test: idr-test.o $(CORE_OFILES)
19 $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
20 24
21multiorder: multiorder.o $(CORE_OFILES) 25multiorder: multiorder.o $(CORE_OFILES)
22 $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
23 26
24clean: 27clean:
25 $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h 28 $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
26 29
27vpath %.c ../../lib 30vpath %.c ../../lib
28 31
29$(OFILES): *.h */*.h generated/map-shift.h \ 32$(OFILES): Makefile *.h */*.h generated/map-shift.h \
30 ../../include/linux/*.h \ 33 ../../include/linux/*.h \
31 ../../include/asm/*.h \ 34 ../../include/asm/*.h \
32 ../../../include/linux/radix-tree.h \ 35 ../../../include/linux/radix-tree.h \
@@ -41,7 +44,7 @@ idr.c: ../../../lib/idr.c
41.PHONY: mapshift 44.PHONY: mapshift
42 45
43mapshift: 46mapshift:
44 @if ! grep -qw $(SHIFT) generated/map-shift.h; then \ 47 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
45 echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ 48 echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
46 generated/map-shift.h; \ 49 generated/map-shift.h; \
47 fi 50 fi
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c
index 9b09ddfe462f..99c40f3ed133 100644
--- a/tools/testing/radix-tree/benchmark.c
+++ b/tools/testing/radix-tree/benchmark.c
@@ -17,6 +17,9 @@
17#include <time.h> 17#include <time.h>
18#include "test.h" 18#include "test.h"
19 19
20#define for_each_index(i, base, order) \
21 for (i = base; i < base + (1 << order); i++)
22
20#define NSEC_PER_SEC 1000000000L 23#define NSEC_PER_SEC 1000000000L
21 24
22static long long benchmark_iter(struct radix_tree_root *root, bool tagged) 25static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
@@ -57,27 +60,176 @@ again:
57 return nsec; 60 return nsec;
58} 61}
59 62
63static void benchmark_insert(struct radix_tree_root *root,
64 unsigned long size, unsigned long step, int order)
65{
66 struct timespec start, finish;
67 unsigned long index;
68 long long nsec;
69
70 clock_gettime(CLOCK_MONOTONIC, &start);
71
72 for (index = 0 ; index < size ; index += step)
73 item_insert_order(root, index, order);
74
75 clock_gettime(CLOCK_MONOTONIC, &finish);
76
77 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
78 (finish.tv_nsec - start.tv_nsec);
79
80 printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n",
81 size, step, order, nsec);
82}
83
84static void benchmark_tagging(struct radix_tree_root *root,
85 unsigned long size, unsigned long step, int order)
86{
87 struct timespec start, finish;
88 unsigned long index;
89 long long nsec;
90
91 clock_gettime(CLOCK_MONOTONIC, &start);
92
93 for (index = 0 ; index < size ; index += step)
94 radix_tree_tag_set(root, index, 0);
95
96 clock_gettime(CLOCK_MONOTONIC, &finish);
97
98 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
99 (finish.tv_nsec - start.tv_nsec);
100
101 printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n",
102 size, step, order, nsec);
103}
104
105static void benchmark_delete(struct radix_tree_root *root,
106 unsigned long size, unsigned long step, int order)
107{
108 struct timespec start, finish;
109 unsigned long index, i;
110 long long nsec;
111
112 clock_gettime(CLOCK_MONOTONIC, &start);
113
114 for (index = 0 ; index < size ; index += step)
115 for_each_index(i, index, order)
116 item_delete(root, i);
117
118 clock_gettime(CLOCK_MONOTONIC, &finish);
119
120 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
121 (finish.tv_nsec - start.tv_nsec);
122
123 printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n",
124 size, step, order, nsec);
125}
126
60static void benchmark_size(unsigned long size, unsigned long step, int order) 127static void benchmark_size(unsigned long size, unsigned long step, int order)
61{ 128{
62 RADIX_TREE(tree, GFP_KERNEL); 129 RADIX_TREE(tree, GFP_KERNEL);
63 long long normal, tagged; 130 long long normal, tagged;
64 unsigned long index;
65 131
66 for (index = 0 ; index < size ; index += step) { 132 benchmark_insert(&tree, size, step, order);
67 item_insert_order(&tree, index, order); 133 benchmark_tagging(&tree, size, step, order);
68 radix_tree_tag_set(&tree, index, 0);
69 }
70 134
71 tagged = benchmark_iter(&tree, true); 135 tagged = benchmark_iter(&tree, true);
72 normal = benchmark_iter(&tree, false); 136 normal = benchmark_iter(&tree, false);
73 137
74 printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", 138 printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n",
75 size, step, order, tagged, normal); 139 size, step, order, tagged);
140 printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n",
141 size, step, order, normal);
142
143 benchmark_delete(&tree, size, step, order);
76 144
77 item_kill_tree(&tree); 145 item_kill_tree(&tree);
78 rcu_barrier(); 146 rcu_barrier();
79} 147}
80 148
149static long long __benchmark_split(unsigned long index,
150 int old_order, int new_order)
151{
152 struct timespec start, finish;
153 long long nsec;
154 RADIX_TREE(tree, GFP_ATOMIC);
155
156 item_insert_order(&tree, index, old_order);
157
158 clock_gettime(CLOCK_MONOTONIC, &start);
159 radix_tree_split(&tree, index, new_order);
160 clock_gettime(CLOCK_MONOTONIC, &finish);
161 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
162 (finish.tv_nsec - start.tv_nsec);
163
164 item_kill_tree(&tree);
165
166 return nsec;
167
168}
169
170static void benchmark_split(unsigned long size, unsigned long step)
171{
172 int i, j, idx;
173 long long nsec = 0;
174
175
176 for (idx = 0; idx < size; idx += step) {
177 for (i = 3; i < 11; i++) {
178 for (j = 0; j < i; j++) {
179 nsec += __benchmark_split(idx, i, j);
180 }
181 }
182 }
183
184 printv(2, "Size %8ld, step %8ld, split time %10lld ns\n",
185 size, step, nsec);
186
187}
188
189static long long __benchmark_join(unsigned long index,
190 unsigned order1, unsigned order2)
191{
192 unsigned long loc;
193 struct timespec start, finish;
194 long long nsec;
195 void *item, *item2 = item_create(index + 1, order1);
196 RADIX_TREE(tree, GFP_KERNEL);
197
198 item_insert_order(&tree, index, order2);
199 item = radix_tree_lookup(&tree, index);
200
201 clock_gettime(CLOCK_MONOTONIC, &start);
202 radix_tree_join(&tree, index + 1, order1, item2);
203 clock_gettime(CLOCK_MONOTONIC, &finish);
204 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
205 (finish.tv_nsec - start.tv_nsec);
206
207 loc = find_item(&tree, item);
208 if (loc == -1)
209 free(item);
210
211 item_kill_tree(&tree);
212
213 return nsec;
214}
215
216static void benchmark_join(unsigned long step)
217{
218 int i, j, idx;
219 long long nsec = 0;
220
221 for (idx = 0; idx < 1 << 10; idx += step) {
222 for (i = 1; i < 15; i++) {
223 for (j = 0; j < i; j++) {
224 nsec += __benchmark_join(idx, i, j);
225 }
226 }
227 }
228
229 printv(2, "Size %8d, step %8ld, join time %10lld ns\n",
230 1 << 10, step, nsec);
231}
232
81void benchmark(void) 233void benchmark(void)
82{ 234{
83 unsigned long size[] = {1 << 10, 1 << 20, 0}; 235 unsigned long size[] = {1 << 10, 1 << 20, 0};
@@ -95,4 +247,11 @@ void benchmark(void)
95 for (c = 0; size[c]; c++) 247 for (c = 0; size[c]; c++)
96 for (s = 0; step[s]; s++) 248 for (s = 0; step[s]; s++)
97 benchmark_size(size[c], step[s] << 9, 9); 249 benchmark_size(size[c], step[s] << 9, 9);
250
251 for (c = 0; size[c]; c++)
252 for (s = 0; step[s]; s++)
253 benchmark_split(size[c], step[s]);
254
255 for (s = 0; step[s]; s++)
256 benchmark_join(step[s]);
98} 257}
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index a26098c6123d..30cd0b296f1a 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -153,6 +153,30 @@ void idr_nowait_test(void)
153 idr_destroy(&idr); 153 idr_destroy(&idr);
154} 154}
155 155
156void idr_get_next_test(void)
157{
158 unsigned long i;
159 int nextid;
160 DEFINE_IDR(idr);
161
162 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
163
164 for(i = 0; indices[i]; i++) {
165 struct item *item = item_create(indices[i], 0);
166 assert(idr_alloc(&idr, item, indices[i], indices[i+1],
167 GFP_KERNEL) == indices[i]);
168 }
169
170 for(i = 0, nextid = 0; indices[i]; i++) {
171 idr_get_next(&idr, &nextid);
172 assert(nextid == indices[i]);
173 nextid++;
174 }
175
176 idr_for_each(&idr, item_idr_free, &idr);
177 idr_destroy(&idr);
178}
179
156void idr_checks(void) 180void idr_checks(void)
157{ 181{
158 unsigned long i; 182 unsigned long i;
@@ -202,6 +226,7 @@ void idr_checks(void)
202 idr_alloc_test(); 226 idr_alloc_test();
203 idr_null_test(); 227 idr_null_test();
204 idr_nowait_test(); 228 idr_nowait_test();
229 idr_get_next_test();
205} 230}
206 231
207/* 232/*
@@ -338,7 +363,7 @@ void ida_check_random(void)
338{ 363{
339 DEFINE_IDA(ida); 364 DEFINE_IDA(ida);
340 DECLARE_BITMAP(bitmap, 2048); 365 DECLARE_BITMAP(bitmap, 2048);
341 int id; 366 int id, err;
342 unsigned int i; 367 unsigned int i;
343 time_t s = time(NULL); 368 time_t s = time(NULL);
344 369
@@ -352,8 +377,11 @@ void ida_check_random(void)
352 ida_remove(&ida, bit); 377 ida_remove(&ida, bit);
353 } else { 378 } else {
354 __set_bit(bit, bitmap); 379 __set_bit(bit, bitmap);
355 ida_pre_get(&ida, GFP_KERNEL); 380 do {
356 assert(!ida_get_new_above(&ida, bit, &id)); 381 ida_pre_get(&ida, GFP_KERNEL);
382 err = ida_get_new_above(&ida, bit, &id);
383 } while (err == -ENOMEM);
384 assert(!err);
357 assert(id == bit); 385 assert(id == bit);
358 } 386 }
359 } 387 }
@@ -362,6 +390,24 @@ void ida_check_random(void)
362 goto repeat; 390 goto repeat;
363} 391}
364 392
393void ida_simple_get_remove_test(void)
394{
395 DEFINE_IDA(ida);
396 unsigned long i;
397
398 for (i = 0; i < 10000; i++) {
399 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
400 }
401 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
402
403 for (i = 0; i < 10000; i++) {
404 ida_simple_remove(&ida, i);
405 }
406 assert(ida_is_empty(&ida));
407
408 ida_destroy(&ida);
409}
410
365void ida_checks(void) 411void ida_checks(void)
366{ 412{
367 DEFINE_IDA(ida); 413 DEFINE_IDA(ida);
@@ -428,15 +474,41 @@ void ida_checks(void)
428 ida_check_max(); 474 ida_check_max();
429 ida_check_conv(); 475 ida_check_conv();
430 ida_check_random(); 476 ida_check_random();
477 ida_simple_get_remove_test();
431 478
432 radix_tree_cpu_dead(1); 479 radix_tree_cpu_dead(1);
433} 480}
434 481
482static void *ida_random_fn(void *arg)
483{
484 rcu_register_thread();
485 ida_check_random();
486 rcu_unregister_thread();
487 return NULL;
488}
489
490void ida_thread_tests(void)
491{
492 pthread_t threads[10];
493 int i;
494
495 for (i = 0; i < ARRAY_SIZE(threads); i++)
496 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
497 perror("creating ida thread");
498 exit(1);
499 }
500
501 while (i--)
502 pthread_join(threads[i], NULL);
503}
504
435int __weak main(void) 505int __weak main(void)
436{ 506{
437 radix_tree_init(); 507 radix_tree_init();
438 idr_checks(); 508 idr_checks();
439 ida_checks(); 509 ida_checks();
510 ida_thread_tests();
511 radix_tree_cpu_dead(1);
440 rcu_barrier(); 512 rcu_barrier();
441 if (nr_allocated) 513 if (nr_allocated)
442 printf("nr_allocated = %d\n", nr_allocated); 514 printf("nr_allocated = %d\n", nr_allocated);
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index b829127d5670..bc9a78449572 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -368,6 +368,7 @@ int main(int argc, char **argv)
368 iteration_test(0, 10 + 90 * long_run); 368 iteration_test(0, 10 + 90 * long_run);
369 iteration_test(7, 10 + 90 * long_run); 369 iteration_test(7, 10 + 90 * long_run);
370 single_thread_tests(long_run); 370 single_thread_tests(long_run);
371 ida_thread_tests();
371 372
372 /* Free any remaining preallocated nodes */ 373 /* Free any remaining preallocated nodes */
373 radix_tree_cpu_dead(0); 374 radix_tree_cpu_dead(0);
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c
index d4ff00989245..36dcf7d6945d 100644
--- a/tools/testing/radix-tree/tag_check.c
+++ b/tools/testing/radix-tree/tag_check.c
@@ -330,6 +330,34 @@ static void single_check(void)
330 item_kill_tree(&tree); 330 item_kill_tree(&tree);
331} 331}
332 332
333void radix_tree_clear_tags_test(void)
334{
335 unsigned long index;
336 struct radix_tree_node *node;
337 struct radix_tree_iter iter;
338 void **slot;
339
340 RADIX_TREE(tree, GFP_KERNEL);
341
342 item_insert(&tree, 0);
343 item_tag_set(&tree, 0, 0);
344 __radix_tree_lookup(&tree, 0, &node, &slot);
345 radix_tree_clear_tags(&tree, node, slot);
346 assert(item_tag_get(&tree, 0, 0) == 0);
347
348 for (index = 0; index < 1000; index++) {
349 item_insert(&tree, index);
350 item_tag_set(&tree, index, 0);
351 }
352
353 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
354 radix_tree_clear_tags(&tree, iter.node, slot);
355 assert(item_tag_get(&tree, iter.index, 0) == 0);
356 }
357
358 item_kill_tree(&tree);
359}
360
333void tag_check(void) 361void tag_check(void)
334{ 362{
335 single_check(); 363 single_check();
@@ -347,4 +375,5 @@ void tag_check(void)
347 thrash_tags(); 375 thrash_tags();
348 rcu_barrier(); 376 rcu_barrier();
349 printv(2, "after thrash_tags: %d allocated\n", nr_allocated); 377 printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
378 radix_tree_clear_tags_test();
350} 379}
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index b30e11d9d271..0f8220cc6166 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -36,6 +36,7 @@ void iteration_test(unsigned order, unsigned duration);
36void benchmark(void); 36void benchmark(void);
37void idr_checks(void); 37void idr_checks(void);
38void ida_checks(void); 38void ida_checks(void);
39void ida_thread_tests(void);
39 40
40struct item * 41struct item *
41item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); 42item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 4b498265dae6..67531f47781b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,12 +1,14 @@
1LIBDIR := ../../../lib 1LIBDIR := ../../../lib
2BPFOBJ := $(LIBDIR)/bpf/bpf.o 2BPFOBJ := $(LIBDIR)/bpf/bpf.o
3 3
4CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) 4CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ)
5 5
6TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map 6TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
7 7
8TEST_PROGS := test_kmod.sh 8TEST_PROGS := test_kmod.sh
9 9
10all: $(TEST_GEN_PROGS)
11
10.PHONY: all clean force 12.PHONY: all clean force
11 13
12# force a rebuild of BPFOBJ when its dependencies are updated 14# force a rebuild of BPFOBJ when its dependencies are updated
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e1f5b9eea1e8..d1555e4240c0 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,8 @@
8 * License as published by the Free Software Foundation. 8 * License as published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <asm/types.h>
12#include <linux/types.h>
11#include <stdint.h> 13#include <stdint.h>
12#include <stdio.h> 14#include <stdio.h>
13#include <stdlib.h> 15#include <stdlib.h>
@@ -4583,10 +4585,12 @@ static bool is_admin(void)
4583 cap_flag_value_t sysadmin = CAP_CLEAR; 4585 cap_flag_value_t sysadmin = CAP_CLEAR;
4584 const cap_value_t cap_val = CAP_SYS_ADMIN; 4586 const cap_value_t cap_val = CAP_SYS_ADMIN;
4585 4587
4588#ifdef CAP_IS_SUPPORTED
4586 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { 4589 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
4587 perror("cap_get_flag"); 4590 perror("cap_get_flag");
4588 return false; 4591 return false;
4589 } 4592 }
4593#endif
4590 caps = cap_get_proc(); 4594 caps = cap_get_proc();
4591 if (!caps) { 4595 if (!caps) {
4592 perror("cap_get_proc"); 4596 perror("cap_get_proc");
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 248a820048df..66d31de60b9a 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
114 114
115 rc = run_test(test_function, name); 115 rc = run_test(test_function, name);
116 116
117 if (rc == MAGIC_SKIP_RETURN_VALUE) 117 if (rc == MAGIC_SKIP_RETURN_VALUE) {
118 test_skip(name); 118 test_skip(name);
119 else 119 /* so that skipped test is not marked as failed */
120 rc = 0;
121 } else
120 test_finish(name, rc); 122 test_finish(name, rc);
121 123
122 return rc; 124 return rc;
diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
index d828bfb6ef2d..54064ced9e95 100644
--- a/tools/testing/selftests/powerpc/include/vsx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vsx_asm.h
@@ -16,56 +16,56 @@
16 */ 16 */
17FUNC_START(load_vsx) 17FUNC_START(load_vsx)
18 li r5,0 18 li r5,0
19 lxvx vs20,r5,r3 19 lxvd2x vs20,r5,r3
20 addi r5,r5,16 20 addi r5,r5,16
21 lxvx vs21,r5,r3 21 lxvd2x vs21,r5,r3
22 addi r5,r5,16 22 addi r5,r5,16
23 lxvx vs22,r5,r3 23 lxvd2x vs22,r5,r3
24 addi r5,r5,16 24 addi r5,r5,16
25 lxvx vs23,r5,r3 25 lxvd2x vs23,r5,r3
26 addi r5,r5,16 26 addi r5,r5,16
27 lxvx vs24,r5,r3 27 lxvd2x vs24,r5,r3
28 addi r5,r5,16 28 addi r5,r5,16
29 lxvx vs25,r5,r3 29 lxvd2x vs25,r5,r3
30 addi r5,r5,16 30 addi r5,r5,16
31 lxvx vs26,r5,r3 31 lxvd2x vs26,r5,r3
32 addi r5,r5,16 32 addi r5,r5,16
33 lxvx vs27,r5,r3 33 lxvd2x vs27,r5,r3
34 addi r5,r5,16 34 addi r5,r5,16
35 lxvx vs28,r5,r3 35 lxvd2x vs28,r5,r3
36 addi r5,r5,16 36 addi r5,r5,16
37 lxvx vs29,r5,r3 37 lxvd2x vs29,r5,r3
38 addi r5,r5,16 38 addi r5,r5,16
39 lxvx vs30,r5,r3 39 lxvd2x vs30,r5,r3
40 addi r5,r5,16 40 addi r5,r5,16
41 lxvx vs31,r5,r3 41 lxvd2x vs31,r5,r3
42 blr 42 blr
43FUNC_END(load_vsx) 43FUNC_END(load_vsx)
44 44
45FUNC_START(store_vsx) 45FUNC_START(store_vsx)
46 li r5,0 46 li r5,0
47 stxvx vs20,r5,r3 47 stxvd2x vs20,r5,r3
48 addi r5,r5,16 48 addi r5,r5,16
49 stxvx vs21,r5,r3 49 stxvd2x vs21,r5,r3
50 addi r5,r5,16 50 addi r5,r5,16
51 stxvx vs22,r5,r3 51 stxvd2x vs22,r5,r3
52 addi r5,r5,16 52 addi r5,r5,16
53 stxvx vs23,r5,r3 53 stxvd2x vs23,r5,r3
54 addi r5,r5,16 54 addi r5,r5,16
55 stxvx vs24,r5,r3 55 stxvd2x vs24,r5,r3
56 addi r5,r5,16 56 addi r5,r5,16
57 stxvx vs25,r5,r3 57 stxvd2x vs25,r5,r3
58 addi r5,r5,16 58 addi r5,r5,16
59 stxvx vs26,r5,r3 59 stxvd2x vs26,r5,r3
60 addi r5,r5,16 60 addi r5,r5,16
61 stxvx vs27,r5,r3 61 stxvd2x vs27,r5,r3
62 addi r5,r5,16 62 addi r5,r5,16
63 stxvx vs28,r5,r3 63 stxvd2x vs28,r5,r3
64 addi r5,r5,16 64 addi r5,r5,16
65 stxvx vs29,r5,r3 65 stxvd2x vs29,r5,r3
66 addi r5,r5,16 66 addi r5,r5,16
67 stxvx vs30,r5,r3 67 stxvd2x vs30,r5,r3
68 addi r5,r5,16 68 addi r5,r5,16
69 stxvx vs31,r5,r3 69 stxvd2x vs31,r5,r3
70 blr 70 blr
71FUNC_END(store_vsx) 71FUNC_END(store_vsx)
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 4cff7e7ddcc4..41642ba5e318 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,5 +1,9 @@
1# Makefile for vm selftests 1# Makefile for vm selftests
2 2
3ifndef OUTPUT
4 OUTPUT := $(shell pwd)
5endif
6
3CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) 7CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
4LDLIBS = -lrt 8LDLIBS = -lrt
5TEST_GEN_FILES = compaction_test 9TEST_GEN_FILES = compaction_test
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 5b2b4b3c634c..b4967d875236 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -245,7 +245,7 @@ void do_unexpected_base(void)
245 long ret; 245 long ret;
246 asm volatile ("int $0x80" 246 asm volatile ("int $0x80"
247 : "=a" (ret) : "a" (243), "b" (low_desc) 247 : "=a" (ret) : "a" (243), "b" (low_desc)
248 : "flags"); 248 : "r8", "r9", "r10", "r11");
249 memcpy(&desc, low_desc, sizeof(desc)); 249 memcpy(&desc, low_desc, sizeof(desc));
250 munmap(low_desc, sizeof(desc)); 250 munmap(low_desc, sizeof(desc));
251 251
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 4af47079cf04..f6121612e769 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -45,6 +45,12 @@
45#define AR_DB (1 << 22) 45#define AR_DB (1 << 22)
46#define AR_G (1 << 23) 46#define AR_G (1 << 23)
47 47
48#ifdef __x86_64__
49# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
50#else
51# define INT80_CLOBBERS
52#endif
53
48static int nerrs; 54static int nerrs;
49 55
50/* Points to an array of 1024 ints, each holding its own index. */ 56/* Points to an array of 1024 ints, each holding its own index. */
@@ -588,7 +594,7 @@ static int invoke_set_thread_area(void)
588 asm volatile ("int $0x80" 594 asm volatile ("int $0x80"
589 : "=a" (ret), "+m" (low_user_desc) : 595 : "=a" (ret), "+m" (low_user_desc) :
590 "a" (243), "b" (low_user_desc) 596 "a" (243), "b" (low_user_desc)
591 : "flags"); 597 : INT80_CLOBBERS);
592 return ret; 598 return ret;
593} 599}
594 600
@@ -657,7 +663,7 @@ static void test_gdt_invalidation(void)
657 "+a" (eax) 663 "+a" (eax)
658 : "m" (low_user_desc_clear), 664 : "m" (low_user_desc_clear),
659 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) 665 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
660 : "flags"); 666 : INT80_CLOBBERS);
661 667
662 if (sel != 0) { 668 if (sel != 0) {
663 result = "FAIL"; 669 result = "FAIL";
@@ -688,7 +694,7 @@ static void test_gdt_invalidation(void)
688 "+a" (eax) 694 "+a" (eax)
689 : "m" (low_user_desc_clear), 695 : "m" (low_user_desc_clear),
690 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) 696 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
691 : "flags"); 697 : INT80_CLOBBERS);
692 698
693 if (sel != 0) { 699 if (sel != 0) {
694 result = "FAIL"; 700 result = "FAIL";
@@ -721,7 +727,7 @@ static void test_gdt_invalidation(void)
721 "+a" (eax) 727 "+a" (eax)
722 : "m" (low_user_desc_clear), 728 : "m" (low_user_desc_clear),
723 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) 729 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
724 : "flags"); 730 : INT80_CLOBBERS);
725 731
726#ifdef __x86_64__ 732#ifdef __x86_64__
727 syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base); 733 syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
@@ -774,7 +780,7 @@ static void test_gdt_invalidation(void)
774 "+a" (eax) 780 "+a" (eax)
775 : "m" (low_user_desc_clear), 781 : "m" (low_user_desc_clear),
776 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear) 782 [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
777 : "flags"); 783 : INT80_CLOBBERS);
778 784
779#ifdef __x86_64__ 785#ifdef __x86_64__
780 syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base); 786 syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index b037ce9cf116..eaea92439708 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args)
58 asm volatile ("int $0x80" 58 asm volatile ("int $0x80"
59 : "+a" (args->nr), 59 : "+a" (args->nr),
60 "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2), 60 "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
61 "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)); 61 "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
62 : : "r8", "r9", "r10", "r11");
62 args->arg5 = bp; 63 args->arg5 = bp;
63#else 64#else
64 sys32_helper(args, int80_and_ret); 65 sys32_helper(args, int80_and_ret);
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 50c26358e8b7..a48da95c18fd 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps;
56#ifdef __x86_64__ 56#ifdef __x86_64__
57# define REG_IP REG_RIP 57# define REG_IP REG_RIP
58# define WIDTH "q" 58# define WIDTH "q"
59# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
59#else 60#else
60# define REG_IP REG_EIP 61# define REG_IP REG_EIP
61# define WIDTH "l" 62# define WIDTH "l"
63# define INT80_CLOBBERS
62#endif 64#endif
63 65
64static unsigned long get_eflags(void) 66static unsigned long get_eflags(void)
@@ -140,7 +142,8 @@ int main()
140 142
141 printf("[RUN]\tSet TF and check int80\n"); 143 printf("[RUN]\tSet TF and check int80\n");
142 set_eflags(get_eflags() | X86_EFLAGS_TF); 144 set_eflags(get_eflags() | X86_EFLAGS_TF);
143 asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)); 145 asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
146 : INT80_CLOBBERS);
144 check_result(); 147 check_result();
145 148
146 /* 149 /*
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 571b64a01c50..8d1da1af4b09 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
360 return ret; 360 return ret;
361} 361}
362 362
363static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
364 struct vgic_its *its,
365 gpa_t addr, unsigned int len)
366{
367 u32 reg = 0;
368
369 mutex_lock(&its->cmd_lock);
370 if (its->creadr == its->cwriter)
371 reg |= GITS_CTLR_QUIESCENT;
372 if (its->enabled)
373 reg |= GITS_CTLR_ENABLE;
374 mutex_unlock(&its->cmd_lock);
375
376 return reg;
377}
378
379static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
380 gpa_t addr, unsigned int len,
381 unsigned long val)
382{
383 its->enabled = !!(val & GITS_CTLR_ENABLE);
384}
385
386static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, 363static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
387 struct vgic_its *its, 364 struct vgic_its *its,
388 gpa_t addr, unsigned int len) 365 gpa_t addr, unsigned int len)
@@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1161#define ITS_CMD_SIZE 32 1138#define ITS_CMD_SIZE 32
1162#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5)) 1139#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1163 1140
1164/* 1141/* Must be called with the cmd_lock held. */
1165 * By writing to CWRITER the guest announces new commands to be processed. 1142static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1166 * To avoid any races in the first place, we take the its_cmd lock, which
1167 * protects our ring buffer variables, so that there is only one user
1168 * per ITS handling commands at a given time.
1169 */
1170static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1171 gpa_t addr, unsigned int len,
1172 unsigned long val)
1173{ 1143{
1174 gpa_t cbaser; 1144 gpa_t cbaser;
1175 u64 cmd_buf[4]; 1145 u64 cmd_buf[4];
1176 u32 reg;
1177 1146
1178 if (!its) 1147 /* Commands are only processed when the ITS is enabled. */
1179 return; 1148 if (!its->enabled)
1180
1181 mutex_lock(&its->cmd_lock);
1182
1183 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1184 reg = ITS_CMD_OFFSET(reg);
1185 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1186 mutex_unlock(&its->cmd_lock);
1187 return; 1149 return;
1188 }
1189 1150
1190 its->cwriter = reg;
1191 cbaser = CBASER_ADDRESS(its->cbaser); 1151 cbaser = CBASER_ADDRESS(its->cbaser);
1192 1152
1193 while (its->cwriter != its->creadr) { 1153 while (its->cwriter != its->creadr) {
@@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1207 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser)) 1167 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1208 its->creadr = 0; 1168 its->creadr = 0;
1209 } 1169 }
1170}
1171
1172/*
1173 * By writing to CWRITER the guest announces new commands to be processed.
1174 * To avoid any races in the first place, we take the its_cmd lock, which
1175 * protects our ring buffer variables, so that there is only one user
1176 * per ITS handling commands at a given time.
1177 */
1178static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1179 gpa_t addr, unsigned int len,
1180 unsigned long val)
1181{
1182 u64 reg;
1183
1184 if (!its)
1185 return;
1186
1187 mutex_lock(&its->cmd_lock);
1188
1189 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1190 reg = ITS_CMD_OFFSET(reg);
1191 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1192 mutex_unlock(&its->cmd_lock);
1193 return;
1194 }
1195 its->cwriter = reg;
1196
1197 vgic_its_process_commands(kvm, its);
1210 1198
1211 mutex_unlock(&its->cmd_lock); 1199 mutex_unlock(&its->cmd_lock);
1212} 1200}
@@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
1287 *regptr = reg; 1275 *regptr = reg;
1288} 1276}
1289 1277
1278static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1279 struct vgic_its *its,
1280 gpa_t addr, unsigned int len)
1281{
1282 u32 reg = 0;
1283
1284 mutex_lock(&its->cmd_lock);
1285 if (its->creadr == its->cwriter)
1286 reg |= GITS_CTLR_QUIESCENT;
1287 if (its->enabled)
1288 reg |= GITS_CTLR_ENABLE;
1289 mutex_unlock(&its->cmd_lock);
1290
1291 return reg;
1292}
1293
1294static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1295 gpa_t addr, unsigned int len,
1296 unsigned long val)
1297{
1298 mutex_lock(&its->cmd_lock);
1299
1300 its->enabled = !!(val & GITS_CTLR_ENABLE);
1301
1302 /*
1303 * Try to process any pending commands. This function bails out early
1304 * if the ITS is disabled or no commands have been queued.
1305 */
1306 vgic_its_process_commands(kvm, its);
1307
1308 mutex_unlock(&its->cmd_lock);
1309}
1310
1290#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \ 1311#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1291{ \ 1312{ \
1292 .reg_offset = off, \ 1313 .reg_offset = off, \
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 3654b4c835ef..2a5db1352722 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -180,21 +180,37 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
180static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, 180static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
181 bool new_active_state) 181 bool new_active_state)
182{ 182{
183 struct kvm_vcpu *requester_vcpu;
183 spin_lock(&irq->irq_lock); 184 spin_lock(&irq->irq_lock);
185
186 /*
187 * The vcpu parameter here can mean multiple things depending on how
188 * this function is called; when handling a trap from the kernel it
189 * depends on the GIC version, and these functions are also called as
190 * part of save/restore from userspace.
191 *
192 * Therefore, we have to figure out the requester in a reliable way.
193 *
194 * When accessing VGIC state from user space, the requester_vcpu is
195 * NULL, which is fine, because we guarantee that no VCPUs are running
196 * when accessing VGIC state from user space so irq->vcpu->cpu is
197 * always -1.
198 */
199 requester_vcpu = kvm_arm_get_running_vcpu();
200
184 /* 201 /*
185 * If this virtual IRQ was written into a list register, we 202 * If this virtual IRQ was written into a list register, we
186 * have to make sure the CPU that runs the VCPU thread has 203 * have to make sure the CPU that runs the VCPU thread has
187 * synced back LR state to the struct vgic_irq. We can only 204 * synced back the LR state to the struct vgic_irq.
188 * know this for sure, when either this irq is not assigned to
189 * anyone's AP list anymore, or the VCPU thread is not
190 * running on any CPUs.
191 * 205 *
192 * In the opposite case, we know the VCPU thread may be on its 206 * As long as the conditions below are true, we know the VCPU thread
193 * way back from the guest and still has to sync back this 207 * may be on its way back from the guest (we kicked the VCPU thread in
194 * IRQ, so we release and re-acquire the spin_lock to let the 208 * vgic_change_active_prepare) and still has to sync back this IRQ,
195 * other thread sync back the IRQ. 209 * so we release and re-acquire the spin_lock to let the other thread
210 * sync back the IRQ.
196 */ 211 */
197 while (irq->vcpu && /* IRQ may have state in an LR somewhere */ 212 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
213 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
198 irq->vcpu->cpu != -1) /* VCPU thread is running */ 214 irq->vcpu->cpu != -1) /* VCPU thread is running */
199 cond_resched_lock(&irq->irq_lock); 215 cond_resched_lock(&irq->irq_lock);
200 216
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index edc6ee2dc852..be0f4c3e0142 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -229,10 +229,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
229 /* 229 /*
230 * If we are emulating a GICv3, we do it in an non-GICv2-compatible 230 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
231 * way, so we force SRE to 1 to demonstrate this to the guest. 231 * way, so we force SRE to 1 to demonstrate this to the guest.
232 * Also, we don't support any form of IRQ/FIQ bypass.
232 * This goes with the spec allowing the value to be RAO/WI. 233 * This goes with the spec allowing the value to be RAO/WI.
233 */ 234 */
234 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { 235 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
235 vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; 236 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
237 ICC_SRE_EL1_DFB |
238 ICC_SRE_EL1_SRE);
236 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; 239 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
237 } else { 240 } else {
238 vgic_v3->vgic_sre = 0; 241 vgic_v3->vgic_sre = 0;