aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS5
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl7
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/uniphier-clock.txt16
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt23
-rw-r--r--Documentation/devicetree/bindings/ipmi/ipmi-smic.txt (renamed from Documentation/devicetree/bindings/ipmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/jcore,pit.txt24
-rw-r--r--Documentation/filesystems/proc.txt26
-rw-r--r--Documentation/gpio/board.txt11
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/ptrace.c9
-rw-r--r--arch/arc/Kconfig27
-rw-r--r--arch/arc/Makefile3
-rw-r--r--arch/arc/boot/Makefile16
-rw-r--r--arch/arc/include/asm/arcregs.h3
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/elf.h2
-rw-r--r--arch/arc/include/asm/mcip.h16
-rw-r--r--arch/arc/include/asm/module.h1
-rw-r--r--arch/arc/include/asm/setup.h6
-rw-r--r--arch/arc/include/asm/syscalls.h1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h9
-rw-r--r--arch/arc/kernel/mcip.c31
-rw-r--r--arch/arc/kernel/module.c53
-rw-r--r--arch/arc/kernel/process.c33
-rw-r--r--arch/arc/kernel/setup.c113
-rw-r--r--arch/arc/kernel/troubleshoot.c110
-rw-r--r--arch/arc/mm/cache.c19
-rw-r--r--arch/arc/mm/dma.c4
-rw-r--r--arch/arc/mm/tlb.c6
-rw-r--r--arch/arc/mm/tlbex.S21
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/exec.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/percpu.h120
-rw-r--r--arch/arm64/include/asm/processor.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h8
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c36
-rw-r--r--arch/arm64/kernel/cpu_errata.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c10
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/suspend.c11
-rw-r--r--arch/arm64/kernel/traps.c30
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/numa.c9
-rw-r--r--arch/blackfin/kernel/ptrace.c5
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c4
-rw-r--r--arch/h8300/include/asm/thread_info.h4
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/m32r/kernel/ptrace.c15
-rw-r--r--arch/mips/kernel/ptrace32.c5
-rw-r--r--arch/mips/kvm/mips.c1
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/powerpc/boot/main.c18
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h16
-rw-r--r--arch/powerpc/include/asm/tlb.h12
-rw-r--r--arch/powerpc/include/asm/unistd.h4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S50
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S35
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/ptrace32.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c1
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/numa.c46
-rw-r--r--arch/powerpc/mm/tlb-radix.c8
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/dumpstack.c63
-rw-r--r--arch/s390/kernel/perf_event.c2
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/kvm/intercept.c9
-rw-r--r--arch/s390/mm/gup.c3
-rw-r--r--arch/s390/mm/hugetlbpage.c1
-rw-r--r--arch/s390/mm/init.c38
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/kernel/ptrace.c10
-rw-r--r--arch/sh/Makefile2
-rw-r--r--arch/sh/boards/Kconfig10
-rw-r--r--arch/sh/configs/j2_defconfig2
-rw-r--r--arch/sh/mm/gup.c3
-rw-r--r--arch/sparc/kernel/ptrace_64.c24
-rw-r--r--arch/sparc/mm/gup.c3
-rw-r--r--arch/x86/entry/Makefile4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/events/intel/core.c13
-rw-r--r--arch/x86/events/intel/cstate.c30
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/intel-family.h1
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/rwsem.h6
-rw-r--r--arch/x86/include/asm/thread_info.h9
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c5
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/mcount_64.S3
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/kernel/signal_compat.c3
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smpboot.c16
-rw-r--r--arch/x86/kernel/step.c3
-rw-r--r--arch/x86/kernel/unwind_guess.c9
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/kaslr.c6
-rw-r--r--arch/x86/mm/mpx.c5
-rw-r--r--arch/x86/mm/pat.c14
-rw-r--r--arch/x86/platform/uv/bios_uv.c10
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c3
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--block/badblocks.c29
-rw-r--r--block/blk-flush.c28
-rw-r--r--block/blk-mq.c6
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpica/dsinit.c11
-rw-r--r--drivers/acpi/acpica/dsmethod.c50
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/pci_link.c38
-rw-r--r--drivers/ata/ahci.c41
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/rbd.c50
-rw-r--r--drivers/char/hw_random/core.c6
-rw-r--r--drivers/char/ipmi/Kconfig8
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c505
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c11
-rw-r--r--drivers/clk/clk-max77686.c1
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c4
-rw-r--r--drivers/clk/mediatek/Kconfig2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c11
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c20
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h2
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/jcore-pit.c249
-rw-r--r--drivers/clocksource/timer-sun5i.c16
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/firewire/nosy.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mxs.c8
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
-rw-r--r--drivers/hid/hid-dr.c83
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-led.c23
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/max31790.c4
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c17
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c1
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/i2c-core.c11
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/irq-eznps.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-jcore-aic.c20
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c22
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/misc/cxl/api.c11
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h24
-rw-r--r--drivers/misc/cxl/file.c15
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c23
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c54
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci.c42
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c2
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/namespace_devs.c14
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/scsi.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/pci/host/pci-layerscape.c2
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c100
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c37
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/scsi_dh.c6
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c14
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/send.c58
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/crypto/crypto.c15
-rw-r--r--fs/crypto/policy.c4
-rw-r--r--fs/exec.c9
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/ext2/inode.c7
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/mballoc.h17
-rw-r--r--fs/ext4/namei.c18
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/ext4/xattr.c20
-rw-r--r--fs/f2fs/gc.c10
-rw-r--r--fs/iomap.c5
-rw-r--r--fs/isofs/inode.c8
-rw-r--r--fs/jbd2/transaction.c3
-rw-r--r--fs/locks.c6
-rw-r--r--fs/nfs/blocklayout/blocklayout.c3
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/orangefs/dcache.c5
-rw-r--r--fs/orangefs/file.c14
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-kernel.h7
-rw-r--r--fs/proc/array.c9
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/task_mmu.c29
-rw-r--r--fs/proc/task_nommu.c28
-rw-r--r--fs/ubifs/dir.c20
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c418
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h8
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c3
-rw-r--r--fs/xfs/libxfs/xfs_format.h1
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h2
-rw-r--r--fs/xfs/xfs_file.c232
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_iomap.c57
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_reflink.c499
-rw-r--r--fs/xfs/xfs_reflink.h11
-rw-r--r--fs/xfs/xfs_sysfs.c4
-rw-r--r--fs/xfs/xfs_trace.h4
-rw-r--r--include/acpi/pcc.h2
-rw-r--r--include/asm-generic/export.h2
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h5
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/mmzone.h30
-rw-r--r--include/linux/nvme.h49
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/thread_info.h11
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bt-bmc.h18
-rw-r--r--ipc/msgutil.c4
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/kcov.c9
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/ptrace.c16
-rw-r--r--kernel/sched/core.c16
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/wait.c10
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/timer.c74
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/stackdepot.c2
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/frame_vector.c9
-rw-r--r--mm/gup.c67
-rw-r--r--mm/kasan/kasan.c22
-rw-r--r--mm/kmemleak.c7
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/memory.c16
-rw-r--r--mm/memory_hotplug.c29
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/nommu.c40
-rw-r--r--mm/page_alloc.c131
-rw-r--r--mm/process_vm_access.c7
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h1
-rw-r--r--mm/util.c12
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/ceph/pagevec.c2
-rw-r--r--security/keys/Kconfig2
-rw-r--r--security/keys/big_key.c59
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--sound/core/seq/seq_timer.c4
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/usb/quirks-table.h17
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/objtool/arch/x86/decode.c9
-rw-r--r--tools/objtool/builtin-check.c68
-rw-r--r--tools/perf/jvmti/Makefile2
-rw-r--r--tools/perf/ui/browsers/hists.c3
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/parse-events.l4
-rw-r--r--virt/kvm/async_pf.c3
-rw-r--r--virt/kvm/kvm_main.c21
478 files changed, 4845 insertions, 2855 deletions
diff --git a/CREDITS b/CREDITS
index 513aaa3546bf..837367624e45 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1864,10 +1864,11 @@ S: The Netherlands
1864 1864
1865N: Martin Kepplinger 1865N: Martin Kepplinger
1866E: martink@posteo.de 1866E: martink@posteo.de
1867E: martin.kepplinger@theobroma-systems.com 1867E: martin.kepplinger@ginzinger.com
1868W: http://www.martinkepplinger.com 1868W: http://www.martinkepplinger.com
1869D: mma8452 accelerators iio driver 1869D: mma8452 accelerators iio driver
1870D: Kernel cleanups 1870D: pegasus_notetaker input driver
1871D: Kernel fixes and cleanups
1871S: Garnisonstraße 26 1872S: Garnisonstraße 26
1872S: 4020 Linz 1873S: 4020 Linz
1873S: Austria 1874S: Austria
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 4ba0a2a61926..640f65e79ef1 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/reset
220Date: October 2014 220Date: October 2014
221Contact: linuxppc-dev@lists.ozlabs.org 221Contact: linuxppc-dev@lists.ozlabs.org
222Description: write only 222Description: write only
223 Writing 1 will issue a PERST to card which may cause the card 223 Writing 1 will issue a PERST to card provided there are no
224 to reload the FPGA depending on load_image_on_perst. 224 contexts active on any one of the card AFUs. This may cause
225 the card to reload the FPGA depending on load_image_on_perst.
226 Writing -1 will do a force PERST irrespective of any active
227 contexts on the card AFUs.
225Users: https://github.com/ibm-capi/libcxl 228Users: https://github.com/ibm-capi/libcxl
226 229
227What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest) 230What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest)
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index e5b6497116f4..c75b64a85859 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -309,3 +309,4 @@ Version History
309 with a reshape in progress. 309 with a reshape in progress.
3101.9.0 Add support for RAID level takeover/reshape/region size 3101.9.0 Add support for RAID level takeover/reshape/region size
311 and set size reduction. 311 and set size reduction.
3121.9.1 Fix activation of existing RAID 4/10 mapped devices
diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
index c7179d3b5c33..812163060fa3 100644
--- a/Documentation/devicetree/bindings/clock/uniphier-clock.txt
+++ b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
@@ -24,7 +24,7 @@ Example:
24 reg = <0x61840000 0x4000>; 24 reg = <0x61840000 0x4000>;
25 25
26 clock { 26 clock {
27 compatible = "socionext,uniphier-ld20-clock"; 27 compatible = "socionext,uniphier-ld11-clock";
28 #clock-cells = <1>; 28 #clock-cells = <1>;
29 }; 29 };
30 30
@@ -43,8 +43,8 @@ Provided clocks:
4321: USB3 ch1 PHY1 4321: USB3 ch1 PHY1
44 44
45 45
46Media I/O (MIO) clock 46Media I/O (MIO) clock, SD clock
47--------------------- 47-------------------------------
48 48
49Required properties: 49Required properties:
50- compatible: should be one of the following: 50- compatible: should be one of the following:
@@ -52,10 +52,10 @@ Required properties:
52 "socionext,uniphier-ld4-mio-clock" - for LD4 SoC. 52 "socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
53 "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC. 53 "socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
54 "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC. 54 "socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
55 "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC. 55 "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC.
56 "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC. 56 "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC.
57 "socionext,uniphier-ld11-mio-clock" - for LD11 SoC. 57 "socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
58 "socionext,uniphier-ld20-mio-clock" - for LD20 SoC. 58 "socionext,uniphier-ld20-sd-clock" - for LD20 SoC.
59- #clock-cells: should be 1. 59- #clock-cells: should be 1.
60 60
61Example: 61Example:
@@ -66,7 +66,7 @@ Example:
66 reg = <0x59810000 0x800>; 66 reg = <0x59810000 0x800>;
67 67
68 clock { 68 clock {
69 compatible = "socionext,uniphier-ld20-mio-clock"; 69 compatible = "socionext,uniphier-ld11-mio-clock";
70 #clock-cells = <1>; 70 #clock-cells = <1>;
71 }; 71 };
72 72
@@ -112,7 +112,7 @@ Example:
112 reg = <0x59820000 0x200>; 112 reg = <0x59820000 0x200>;
113 113
114 clock { 114 clock {
115 compatible = "socionext,uniphier-ld20-peri-clock"; 115 compatible = "socionext,uniphier-ld11-peri-clock";
116 #clock-cells = <1>; 116 #clock-cells = <1>;
117 }; 117 };
118 118
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
new file mode 100644
index 000000000000..fbbacd958240
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
@@ -0,0 +1,23 @@
1* Aspeed BT (Block Transfer) IPMI interface
2
3The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs
4(BaseBoard Management Controllers) and the BT interface can be used to
5perform in-band IPMI communication with their host.
6
7Required properties:
8
9- compatible : should be "aspeed,ast2400-bt-bmc"
10- reg: physical address and size of the registers
11
12Optional properties:
13
14- interrupts: interrupt generated by the BT interface. without an
15 interrupt, the driver will operate in poll mode.
16
17Example:
18
19 ibt@1e789140 {
20 compatible = "aspeed,ast2400-bt-bmc";
21 reg = <0x1e789140 0x18>;
22 interrupts = <8>;
23 };
diff --git a/Documentation/devicetree/bindings/ipmi.txt b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
index d5f1a877ed3e..d5f1a877ed3e 100644
--- a/Documentation/devicetree/bindings/ipmi.txt
+++ b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
index 5e60ad18f147..2ad18c4ea55c 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -43,7 +43,9 @@ aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
43 43
44GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8 44GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
45I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 45I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
46RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8 46RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6
47TIMER7 TIMER8 VGABIOSROM
48
47 49
48Examples: 50Examples:
49 51
diff --git a/Documentation/devicetree/bindings/timer/jcore,pit.txt b/Documentation/devicetree/bindings/timer/jcore,pit.txt
new file mode 100644
index 000000000000..af5dd35469d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/jcore,pit.txt
@@ -0,0 +1,24 @@
1J-Core Programmable Interval Timer and Clocksource
2
3Required properties:
4
5- compatible: Must be "jcore,pit".
6
7- reg: Memory region(s) for timer/clocksource registers. For SMP,
8 there should be one region per cpu, indexed by the sequential,
9 zero-based hardware cpu number.
10
11- interrupts: An interrupt to assign for the timer. The actual pit
12 core is integrated with the aic and allows the timer interrupt
13 assignment to be programmed by software, but this property is
14 required in order to reserve an interrupt number that doesn't
15 conflict with other devices.
16
17
18Example:
19
20timer@200 {
21 compatible = "jcore,pit";
22 reg = < 0x200 0x30 0x500 0x30 >;
23 interrupts = < 0x48 >;
24};
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 219ffd41a911..74329fd0add2 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -395,32 +395,6 @@ is not associated with a file:
395 395
396 or if empty, the mapping is anonymous. 396 or if empty, the mapping is anonymous.
397 397
398The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
399of the individual tasks of a process. In this file you will see a mapping marked
400as [stack] if that task sees it as a stack. Hence, for the example above, the
401task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
402
40308048000-08049000 r-xp 00000000 03:00 8312 /opt/test
40408049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
4050804a000-0806b000 rw-p 00000000 00:00 0 [heap]
406a7cb1000-a7cb2000 ---p 00000000 00:00 0
407a7cb2000-a7eb2000 rw-p 00000000 00:00 0
408a7eb2000-a7eb3000 ---p 00000000 00:00 0
409a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack]
410a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
411a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
412a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
413a800b000-a800e000 rw-p 00000000 00:00 0
414a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0
415a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0
416a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0
417a8024000-a8027000 rw-p 00000000 00:00 0
418a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2
419a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2
420a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2
421aff35000-aff4a000 rw-p 00000000 00:00 0
422ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso]
423
424The /proc/PID/smaps is an extension based on maps, showing the memory 398The /proc/PID/smaps is an extension based on maps, showing the memory
425consumption for each of the process's mappings. For each of mappings there 399consumption for each of the process's mappings. For each of mappings there
426is a series of lines such as the following: 400is a series of lines such as the following:
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index 40884c4fe40c..a0f61898d493 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -6,7 +6,7 @@ Note that it only applies to the new descriptor-based interface. For a
6description of the deprecated integer-based GPIO interface please refer to 6description of the deprecated integer-based GPIO interface please refer to
7gpio-legacy.txt (actually, there is no real mapping possible with the old 7gpio-legacy.txt (actually, there is no real mapping possible with the old
8interface; you just fetch an integer from somewhere and request the 8interface; you just fetch an integer from somewhere and request the
9corresponding GPIO. 9corresponding GPIO).
10 10
11All platforms can enable the GPIO library, but if the platform strictly 11All platforms can enable the GPIO library, but if the platform strictly
12requires GPIO functionality to be present, it needs to select GPIOLIB from its 12requires GPIO functionality to be present, it needs to select GPIOLIB from its
@@ -162,6 +162,9 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
162 162
163Since the "led" GPIOs are mapped as active-high, this example will switch their 163Since the "led" GPIOs are mapped as active-high, this example will switch their
164signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped 164signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
165as active-low, its actual signal will be 0 after this code. Contrary to the legacy 165as active-low, its actual signal will be 0 after this code. Contrary to the
166integer GPIO interface, the active-low property is handled during mapping and is 166legacy integer GPIO interface, the active-low property is handled during
167thus transparent to GPIO consumers. 167mapping and is thus transparent to GPIO consumers.
168
169A set of functions such as gpiod_set_value() is available to work with
170the new descriptor-oriented interface.
diff --git a/MAINTAINERS b/MAINTAINERS
index 1cd38a7e0064..f30b8ea700fd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4620,8 +4620,9 @@ F: sound/usb/misc/ua101.c
4620 4620
4621EXTENSIBLE FIRMWARE INTERFACE (EFI) 4621EXTENSIBLE FIRMWARE INTERFACE (EFI)
4622M: Matt Fleming <matt@codeblueprint.co.uk> 4622M: Matt Fleming <matt@codeblueprint.co.uk>
4623M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
4623L: linux-efi@vger.kernel.org 4624L: linux-efi@vger.kernel.org
4624T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 4625T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
4625S: Maintained 4626S: Maintained
4626F: Documentation/efi-stub.txt 4627F: Documentation/efi-stub.txt
4627F: arch/ia64/kernel/efi.c 4628F: arch/ia64/kernel/efi.c
@@ -8099,6 +8100,7 @@ S: Maintained
8099F: drivers/media/dvb-frontends/mn88473* 8100F: drivers/media/dvb-frontends/mn88473*
8100 8101
8101MODULE SUPPORT 8102MODULE SUPPORT
8103M: Jessica Yu <jeyu@redhat.com>
8102M: Rusty Russell <rusty@rustcorp.com.au> 8104M: Rusty Russell <rusty@rustcorp.com.au>
8103S: Maintained 8105S: Maintained
8104F: include/linux/module.h 8106F: include/linux/module.h
@@ -8212,7 +8214,7 @@ F: include/linux/mfd/
8212MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 8214MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
8213M: Ulf Hansson <ulf.hansson@linaro.org> 8215M: Ulf Hansson <ulf.hansson@linaro.org>
8214L: linux-mmc@vger.kernel.org 8216L: linux-mmc@vger.kernel.org
8215T: git git://git.linaro.org/people/ulf.hansson/mmc.git 8217T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
8216S: Maintained 8218S: Maintained
8217F: Documentation/devicetree/bindings/mmc/ 8219F: Documentation/devicetree/bindings/mmc/
8218F: drivers/mmc/ 8220F: drivers/mmc/
@@ -9299,7 +9301,7 @@ S: Maintained
9299F: drivers/pci/host/*designware* 9301F: drivers/pci/host/*designware*
9300 9302
9301PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE 9303PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
9302M: Joao Pinto <jpinto@synopsys.com> 9304M: Jose Abreu <Jose.Abreu@synopsys.com>
9303L: linux-pci@vger.kernel.org 9305L: linux-pci@vger.kernel.org
9304S: Maintained 9306S: Maintained
9305F: Documentation/devicetree/bindings/pci/designware-pcie.txt 9307F: Documentation/devicetree/bindings/pci/designware-pcie.txt
diff --git a/Makefile b/Makefile
index 512e47a53e9a..93beca4312c4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index d9ee81769899..940dfb406591 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
157static inline int 157static inline int
158read_int(struct task_struct *task, unsigned long addr, int * data) 158read_int(struct task_struct *task, unsigned long addr, int * data)
159{ 159{
160 int copied = access_process_vm(task, addr, data, sizeof(int), 0); 160 int copied = access_process_vm(task, addr, data, sizeof(int),
161 FOLL_FORCE);
161 return (copied == sizeof(int)) ? 0 : -EIO; 162 return (copied == sizeof(int)) ? 0 : -EIO;
162} 163}
163 164
164static inline int 165static inline int
165write_int(struct task_struct *task, unsigned long addr, int data) 166write_int(struct task_struct *task, unsigned long addr, int data)
166{ 167{
167 int copied = access_process_vm(task, addr, &data, sizeof(int), 1); 168 int copied = access_process_vm(task, addr, &data, sizeof(int),
169 FOLL_FORCE | FOLL_WRITE);
168 return (copied == sizeof(int)) ? 0 : -EIO; 170 return (copied == sizeof(int)) ? 0 : -EIO;
169} 171}
170 172
@@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request,
281 /* When I and D space are separate, these will need to be fixed. */ 283 /* When I and D space are separate, these will need to be fixed. */
282 case PTRACE_PEEKTEXT: /* read word at location addr. */ 284 case PTRACE_PEEKTEXT: /* read word at location addr. */
283 case PTRACE_PEEKDATA: 285 case PTRACE_PEEKDATA:
284 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 286 copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
287 FOLL_FORCE);
285 ret = -EIO; 288 ret = -EIO;
286 if (copied != sizeof(tmp)) 289 if (copied != sizeof(tmp))
287 break; 290 break;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ecd12379e2cd..bd204bfa29ed 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -41,6 +41,8 @@ config ARC
41 select PERF_USE_VMALLOC 41 select PERF_USE_VMALLOC
42 select HAVE_DEBUG_STACKOVERFLOW 42 select HAVE_DEBUG_STACKOVERFLOW
43 select HAVE_GENERIC_DMA_COHERENT 43 select HAVE_GENERIC_DMA_COHERENT
44 select HAVE_KERNEL_GZIP
45 select HAVE_KERNEL_LZMA
44 46
45config MIGHT_HAVE_PCI 47config MIGHT_HAVE_PCI
46 bool 48 bool
@@ -186,14 +188,6 @@ if SMP
186config ARC_HAS_COH_CACHES 188config ARC_HAS_COH_CACHES
187 def_bool n 189 def_bool n
188 190
189config ARC_MCIP
190 bool "ARConnect Multicore IP (MCIP) Support "
191 depends on ISA_ARCV2
192 help
193 This IP block enables SMP in ARC-HS38 cores.
194 It provides for cross-core interrupts, multi-core debug
195 hardware semaphores, shared memory,....
196
197config NR_CPUS 191config NR_CPUS
198 int "Maximum number of CPUs (2-4096)" 192 int "Maximum number of CPUs (2-4096)"
199 range 2 4096 193 range 2 4096
@@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
211 205
212endif #SMP 206endif #SMP
213 207
208config ARC_MCIP
209 bool "ARConnect Multicore IP (MCIP) Support "
210 depends on ISA_ARCV2
211 default y if SMP
212 help
213 This IP block enables SMP in ARC-HS38 cores.
214 It provides for cross-core interrupts, multi-core debug
215 hardware semaphores, shared memory,....
216
214menuconfig ARC_CACHE 217menuconfig ARC_CACHE
215 bool "Enable Cache Support" 218 bool "Enable Cache Support"
216 default y 219 default y
@@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
537 bool "Paranoia Checks in Low Level TLB Handlers" 540 bool "Paranoia Checks in Low Level TLB Handlers"
538 default n 541 default n
539 542
540config ARC_DBG_TLB_MISS_COUNT
541 bool "Profile TLB Misses"
542 default n
543 select DEBUG_FS
544 help
545 Counts number of I and D TLB Misses and exports them via Debugfs
546 The counters can be cleared via Debugfs as well
547
548endif 543endif
549 544
550config ARC_UBOOT_SUPPORT 545config ARC_UBOOT_SUPPORT
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aa82d13d4213..864adad52280 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
50 50
51cflags-$(atleast_gcc44) += -fsection-anchors 51cflags-$(atleast_gcc44) += -fsection-anchors
52 52
53cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
54cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
55
56ifdef CONFIG_ISA_ARCV2 53ifdef CONFIG_ISA_ARCV2
57 54
58ifndef CONFIG_ARC_HAS_LL64 55ifndef CONFIG_ARC_HAS_LL64
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index e597cb34c16a..f94cf151e06a 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
14 14
15suffix-y := bin 15suffix-y := bin
16suffix-$(CONFIG_KERNEL_GZIP) := gz 16suffix-$(CONFIG_KERNEL_GZIP) := gz
17suffix-$(CONFIG_KERNEL_LZMA) := lzma
17 18
18targets += uImage uImage.bin uImage.gz 19targets += uImage
19extra-y += vmlinux.bin vmlinux.bin.gz 20targets += uImage.bin
21targets += uImage.gz
22targets += uImage.lzma
23extra-y += vmlinux.bin
24extra-y += vmlinux.bin.gz
25extra-y += vmlinux.bin.lzma
20 26
21$(obj)/vmlinux.bin: vmlinux FORCE 27$(obj)/vmlinux.bin: vmlinux FORCE
22 $(call if_changed,objcopy) 28 $(call if_changed,objcopy)
@@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
24$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 30$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
25 $(call if_changed,gzip) 31 $(call if_changed,gzip)
26 32
33$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
34 $(call if_changed,lzma)
35
27$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE 36$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
28 $(call if_changed,uimage,none) 37 $(call if_changed,uimage,none)
29 38
30$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 39$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
31 $(call if_changed,uimage,gzip) 40 $(call if_changed,uimage,gzip)
32 41
42$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
43 $(call if_changed,uimage,lzma)
44
33$(obj)/uImage: $(obj)/uImage.$(suffix-y) 45$(obj)/uImage: $(obj)/uImage.$(suffix-y)
34 @ln -sf $(notdir $<) $@ 46 @ln -sf $(notdir $<) $@
35 @echo ' Image $@ is ready' 47 @echo ' Image $@ is ready'
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index db25c65155cb..7f3f9f63708c 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -349,10 +349,11 @@ struct cpuinfo_arc {
349 struct cpuinfo_arc_bpu bpu; 349 struct cpuinfo_arc_bpu bpu;
350 struct bcr_identity core; 350 struct bcr_identity core;
351 struct bcr_isa isa; 351 struct bcr_isa isa;
352 const char *details, *name;
352 unsigned int vec_base; 353 unsigned int vec_base;
353 struct cpuinfo_arc_ccm iccm, dccm; 354 struct cpuinfo_arc_ccm iccm, dccm;
354 struct { 355 struct {
355 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, 356 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
356 fpu_sp:1, fpu_dp:1, pad2:6, 357 fpu_sp:1, fpu_dp:1, pad2:6,
357 debug:1, ap:1, smart:1, rtt:1, pad3:4, 358 debug:1, ap:1, smart:1, rtt:1, pad3:4,
358 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; 359 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index fb781e34f322..b3410ff6a62d 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -53,7 +53,7 @@ extern void arc_cache_init(void);
53extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 53extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
54extern void read_decode_cache_bcr(void); 54extern void read_decode_cache_bcr(void);
55 55
56extern int ioc_exists; 56extern int ioc_enable;
57extern unsigned long perip_base, perip_end; 57extern unsigned long perip_base, perip_end;
58 58
59#endif /* !__ASSEMBLY__ */ 59#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index 7096f97a1434..aa2d6da9d187 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
54 * the loader. We need to make sure that it is out of the way of the program 54 * the loader. We need to make sure that it is out of the way of the program
55 * that it will "exec", and that there is sufficient room for the brk. 55 * that it will "exec", and that there is sufficient room for the brk.
56 */ 56 */
57#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 57#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
58 58
59/* 59/*
60 * When the program starts, a1 contains a pointer to a function to be 60 * When the program starts, a1 contains a pointer to a function to be
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 847e3bbe387f..c8fbe4114bad 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -55,6 +55,22 @@ struct mcip_cmd {
55#define IDU_M_DISTRI_DEST 0x2 55#define IDU_M_DISTRI_DEST 0x2
56}; 56};
57 57
58struct mcip_bcr {
59#ifdef CONFIG_CPU_BIG_ENDIAN
60 unsigned int pad3:8,
61 idu:1, llm:1, num_cores:6,
62 iocoh:1, gfrc:1, dbg:1, pad2:1,
63 msg:1, sem:1, ipi:1, pad:1,
64 ver:8;
65#else
66 unsigned int ver:8,
67 pad:1, ipi:1, sem:1, msg:1,
68 pad2:1, dbg:1, gfrc:1, iocoh:1,
69 num_cores:6, llm:1, idu:1,
70 pad3:8;
71#endif
72};
73
58/* 74/*
59 * MCIP programming model 75 * MCIP programming model
60 * 76 *
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 518222bb3f8e..6e91d8b339c3 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -18,6 +18,7 @@
18struct mod_arch_specific { 18struct mod_arch_specific {
19 void *unw_info; 19 void *unw_info;
20 int unw_sec_idx; 20 int unw_sec_idx;
21 const char *secstr;
21}; 22};
22#endif 23#endif
23 24
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 48b37c693db3..cb954cdab070 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -27,11 +27,6 @@ struct id_to_str {
27 const char *str; 27 const char *str;
28}; 28};
29 29
30struct cpuinfo_data {
31 struct id_to_str info;
32 int up_range;
33};
34
35extern int root_mountflags, end_mem; 30extern int root_mountflags, end_mem;
36 31
37void setup_processor(void); 32void setup_processor(void);
@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
43#define IS_USED_RUN(v) ((v) ? "" : "(not used) ") 38#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
44#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg)) 39#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
45#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg)) 40#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
41#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
46 42
47#endif /* __ASMARC_SETUP_H */ 43#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e56f9fcc5581..772b67ca56e7 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
17int sys_cacheflush(uint32_t, uint32_t uint32_t); 17int sys_cacheflush(uint32_t, uint32_t uint32_t);
18int sys_arc_settls(void *); 18int sys_arc_settls(void *);
19int sys_arc_gettls(void); 19int sys_arc_gettls(void);
20int sys_arc_usr_cmpxchg(int *, int, int);
20 21
21#include <asm-generic/syscalls.h> 22#include <asm-generic/syscalls.h>
22 23
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 41fa2ec9e02c..9a34136d84b2 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -27,18 +27,19 @@
27 27
28#define NR_syscalls __NR_syscalls 28#define NR_syscalls __NR_syscalls
29 29
30/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
31#define __NR_sysfs (__NR_arch_specific_syscall + 3)
32
30/* ARC specific syscall */ 33/* ARC specific syscall */
31#define __NR_cacheflush (__NR_arch_specific_syscall + 0) 34#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
32#define __NR_arc_settls (__NR_arch_specific_syscall + 1) 35#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
33#define __NR_arc_gettls (__NR_arch_specific_syscall + 2) 36#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
37#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
34 38
35__SYSCALL(__NR_cacheflush, sys_cacheflush) 39__SYSCALL(__NR_cacheflush, sys_cacheflush)
36__SYSCALL(__NR_arc_settls, sys_arc_settls) 40__SYSCALL(__NR_arc_settls, sys_arc_settls)
37__SYSCALL(__NR_arc_gettls, sys_arc_gettls) 41__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
38 42__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
39
40/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
41#define __NR_sysfs (__NR_arch_specific_syscall + 3)
42__SYSCALL(__NR_sysfs, sys_sysfs) 43__SYSCALL(__NR_sysfs, sys_sysfs)
43 44
44#undef __SYSCALL 45#undef __SYSCALL
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 72f9179b1a24..c424d5abc318 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -15,11 +15,12 @@
15#include <asm/mcip.h> 15#include <asm/mcip.h>
16#include <asm/setup.h> 16#include <asm/setup.h>
17 17
18static char smp_cpuinfo_buf[128];
19static int idu_detected;
20
21static DEFINE_RAW_SPINLOCK(mcip_lock); 18static DEFINE_RAW_SPINLOCK(mcip_lock);
22 19
20#ifdef CONFIG_SMP
21
22static char smp_cpuinfo_buf[128];
23
23static void mcip_setup_per_cpu(int cpu) 24static void mcip_setup_per_cpu(int cpu)
24{ 25{
25 smp_ipi_irq_setup(cpu, IPI_IRQ); 26 smp_ipi_irq_setup(cpu, IPI_IRQ);
@@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
86 87
87static void mcip_probe_n_setup(void) 88static void mcip_probe_n_setup(void)
88{ 89{
89 struct mcip_bcr { 90 struct mcip_bcr mp;
90#ifdef CONFIG_CPU_BIG_ENDIAN
91 unsigned int pad3:8,
92 idu:1, llm:1, num_cores:6,
93 iocoh:1, gfrc:1, dbg:1, pad2:1,
94 msg:1, sem:1, ipi:1, pad:1,
95 ver:8;
96#else
97 unsigned int ver:8,
98 pad:1, ipi:1, sem:1, msg:1,
99 pad2:1, dbg:1, gfrc:1, iocoh:1,
100 num_cores:6, llm:1, idu:1,
101 pad3:8;
102#endif
103 } mp;
104 91
105 READ_BCR(ARC_REG_MCIP_BCR, mp); 92 READ_BCR(ARC_REG_MCIP_BCR, mp);
106 93
@@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
114 IS_AVAIL1(mp.gfrc, "GFRC")); 101 IS_AVAIL1(mp.gfrc, "GFRC"));
115 102
116 cpuinfo_arc700[0].extn.gfrc = mp.gfrc; 103 cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
117 idu_detected = mp.idu;
118 104
119 if (mp.dbg) { 105 if (mp.dbg) {
120 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); 106 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
130 .ipi_clear = mcip_ipi_clear, 116 .ipi_clear = mcip_ipi_clear,
131}; 117};
132 118
119#endif
120
133/*************************************************************************** 121/***************************************************************************
134 * ARCv2 Interrupt Distribution Unit (IDU) 122 * ARCv2 Interrupt Distribution Unit (IDU)
135 * 123 *
@@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
295 /* Read IDU BCR to confirm nr_irqs */ 283 /* Read IDU BCR to confirm nr_irqs */
296 int nr_irqs = of_irq_count(intc); 284 int nr_irqs = of_irq_count(intc);
297 int i, irq; 285 int i, irq;
286 struct mcip_bcr mp;
287
288 READ_BCR(ARC_REG_MCIP_BCR, mp);
298 289
299 if (!idu_detected) 290 if (!mp.idu)
300 panic("IDU not detected, but DeviceTree using it"); 291 panic("IDU not detected, but DeviceTree using it");
301 292
302 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); 293 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 9a2849756022..42e964db2967 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
30 char *secstr, struct module *mod) 30 char *secstr, struct module *mod)
31{ 31{
32#ifdef CONFIG_ARC_DW2_UNWIND 32#ifdef CONFIG_ARC_DW2_UNWIND
33 int i;
34
35 mod->arch.unw_sec_idx = 0; 33 mod->arch.unw_sec_idx = 0;
36 mod->arch.unw_info = NULL; 34 mod->arch.unw_info = NULL;
37 35 mod->arch.secstr = secstr;
38 for (i = 1; i < hdr->e_shnum; i++) {
39 if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
40 mod->arch.unw_sec_idx = i;
41 break;
42 }
43 }
44#endif 36#endif
45 return 0; 37 return 0;
46} 38}
@@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
59 unsigned int relsec, /* sec index for relo sec */ 51 unsigned int relsec, /* sec index for relo sec */
60 struct module *module) 52 struct module *module)
61{ 53{
62 int i, n; 54 int i, n, relo_type;
63 Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr; 55 Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
64 Elf32_Sym *sym_entry, *sym_sec; 56 Elf32_Sym *sym_entry, *sym_sec;
65 Elf32_Addr relocation; 57 Elf32_Addr relocation, location, tgt_addr;
66 Elf32_Addr location; 58 unsigned int tgtsec;
67 Elf32_Addr sec_to_patch; 59
68 int relo_type; 60 /*
69 61 * @relsec has relocations e.g. .rela.init.text
70 sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr; 62 * @tgtsec is section to patch e.g. .init.text
63 */
64 tgtsec = sechdrs[relsec].sh_info;
65 tgt_addr = sechdrs[tgtsec].sh_addr;
71 sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr; 66 sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
72 n = sechdrs[relsec].sh_size / sizeof(*rel_entry); 67 n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
73 68
74 pr_debug("\n========== Module Sym reloc ===========================\n"); 69 pr_debug("\nSection to fixup %s @%x\n",
75 pr_debug("Section to fixup %x\n", sec_to_patch); 70 module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
76 pr_debug("=========================================================\n"); 71 pr_debug("=========================================================\n");
77 pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n"); 72 pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
78 pr_debug("=========================================================\n"); 73 pr_debug("=========================================================\n");
79 74
80 /* Loop thru entries in relocation section */ 75 /* Loop thru entries in relocation section */
81 for (i = 0; i < n; i++) { 76 for (i = 0; i < n; i++) {
77 const char *s;
82 78
83 /* This is where to make the change */ 79 /* This is where to make the change */
84 location = sec_to_patch + rel_entry[i].r_offset; 80 location = tgt_addr + rel_entry[i].r_offset;
85 81
86 /* This is the symbol it is referring to. Note that all 82 /* This is the symbol it is referring to. Note that all
87 undefined symbols have been resolved. */ 83 undefined symbols have been resolved. */
@@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
89 85
90 relocation = sym_entry->st_value + rel_entry[i].r_addend; 86 relocation = sym_entry->st_value + rel_entry[i].r_addend;
91 87
92 pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n", 88 if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
93 rel_entry[i].r_offset, rel_entry[i].r_addend, 89 s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
94 sym_entry->st_value, location, relocation, 90 } else {
95 strtab + sym_entry->st_name); 91 s = strtab + sym_entry->st_name;
92 }
93
94 pr_debug(" %x\t%x\t%x %x %x [%s]\n",
95 rel_entry[i].r_offset, rel_entry[i].r_addend,
96 sym_entry->st_value, location, relocation, s);
96 97
97 /* This assumes modules are built with -mlong-calls 98 /* This assumes modules are built with -mlong-calls
98 * so any branches/jumps are absolute 32 bit jmps 99 * so any branches/jumps are absolute 32 bit jmps
@@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
111 goto relo_err; 112 goto relo_err;
112 113
113 } 114 }
115
116 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
117 module->arch.unw_sec_idx = tgtsec;
118
114 return 0; 119 return 0;
115 120
116relo_err: 121relo_err:
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index be1972bd2729..59aa43cb146e 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
41 return task_thread_info(current)->thr_ptr; 41 return task_thread_info(current)->thr_ptr;
42} 42}
43 43
44SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
45{
46 int uval;
47 int ret;
48
49 /*
50 * This is only for old cores lacking LLOCK/SCOND, which by defintion
51 * can't possibly be SMP. Thus doesn't need to be SMP safe.
52 * And this also helps reduce the overhead for serializing in
53 * the UP case
54 */
55 WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
56
57 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
58 return -EFAULT;
59
60 preempt_disable();
61
62 ret = __get_user(uval, uaddr);
63 if (ret)
64 goto done;
65
66 if (uval != expected)
67 ret = -EAGAIN;
68 else
69 ret = __put_user(new, uaddr);
70
71done:
72 preempt_enable();
73
74 return ret;
75}
76
44void arch_cpu_idle(void) 77void arch_cpu_idle(void)
45{ 78{
46 /* sleep, but enable all interrupts before committing */ 79 /* sleep, but enable all interrupts before committing */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3df7f9c72f42..0385df77a697 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
40 40
41struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 41struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
42 42
43static const struct id_to_str arc_cpu_rel[] = {
44#ifdef CONFIG_ISA_ARCOMPACT
45 { 0x34, "R4.10"},
46 { 0x35, "R4.11"},
47#else
48 { 0x51, "R2.0" },
49 { 0x52, "R2.1" },
50 { 0x53, "R3.0" },
51#endif
52 { 0x00, NULL }
53};
54
55static const struct id_to_str arc_cpu_nm[] = {
56#ifdef CONFIG_ISA_ARCOMPACT
57 { 0x20, "ARC 600" },
58 { 0x30, "ARC 770" }, /* 750 identified seperately */
59#else
60 { 0x40, "ARC EM" },
61 { 0x50, "ARC HS38" },
62#endif
63 { 0x00, "Unknown" }
64};
65
43static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) 66static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
44{ 67{
45 if (is_isa_arcompact()) { 68 if (is_isa_arcompact()) {
@@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
92 struct bcr_timer timer; 115 struct bcr_timer timer;
93 struct bcr_generic bcr; 116 struct bcr_generic bcr;
94 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 117 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
118 const struct id_to_str *tbl;
119
95 FIX_PTR(cpu); 120 FIX_PTR(cpu);
96 121
97 READ_BCR(AUX_IDENTITY, cpu->core); 122 READ_BCR(AUX_IDENTITY, cpu->core);
98 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); 123 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
99 124
125 for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
126 if (cpu->core.family == tbl->id) {
127 cpu->details = tbl->str;
128 break;
129 }
130 }
131
132 for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
133 if ((cpu->core.family & 0xF0) == tbl->id)
134 break;
135 }
136 cpu->name = tbl->str;
137
100 READ_BCR(ARC_REG_TIMERS_BCR, timer); 138 READ_BCR(ARC_REG_TIMERS_BCR, timer);
101 cpu->extn.timer0 = timer.t0; 139 cpu->extn.timer0 = timer.t0;
102 cpu->extn.timer1 = timer.t1; 140 cpu->extn.timer1 = timer.t1;
@@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
111 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ 149 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
112 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; 150 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
113 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ 151 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
152 cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
153 IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
154
114 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); 155 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
115 156
116 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ 157 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
160 cpu->extn.rtt = bcr.ver ? 1 : 0; 201 cpu->extn.rtt = bcr.ver ? 1 : 0;
161 202
162 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; 203 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
163}
164 204
165static const struct cpuinfo_data arc_cpu_tbl[] = { 205 /* some hacks for lack of feature BCR info in old ARC700 cores */
166#ifdef CONFIG_ISA_ARCOMPACT 206 if (is_isa_arcompact()) {
167 { {0x20, "ARC 600" }, 0x2F}, 207 if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
168 { {0x30, "ARC 700" }, 0x33}, 208 cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
169 { {0x34, "ARC 700 R4.10"}, 0x34}, 209 else
170 { {0x35, "ARC 700 R4.11"}, 0x35}, 210 cpu->isa.atomic = cpu->isa.atomic1;
171#else
172 { {0x50, "ARC HS38 R2.0"}, 0x51},
173 { {0x52, "ARC HS38 R2.1"}, 0x52},
174 { {0x53, "ARC HS38 R3.0"}, 0x53},
175#endif
176 { {0x00, NULL } }
177};
178 211
212 cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
213
214 /* there's no direct way to distinguish 750 vs. 770 */
215 if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
216 cpu->name = "ARC750";
217 }
218}
179 219
180static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 220static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
181{ 221{
182 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 222 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
183 struct bcr_identity *core = &cpu->core; 223 struct bcr_identity *core = &cpu->core;
184 const struct cpuinfo_data *tbl; 224 int i, n = 0;
185 char *isa_nm;
186 int i, be, atomic;
187 int n = 0;
188 225
189 FIX_PTR(cpu); 226 FIX_PTR(cpu);
190 227
191 if (is_isa_arcompact()) {
192 isa_nm = "ARCompact";
193 be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
194
195 atomic = cpu->isa.atomic1;
196 if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
197 atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
198 } else {
199 isa_nm = "ARCv2";
200 be = cpu->isa.be;
201 atomic = cpu->isa.atomic;
202 }
203
204 n += scnprintf(buf + n, len - n, 228 n += scnprintf(buf + n, len - n,
205 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", 229 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
206 core->family, core->cpu_id, core->chip_id); 230 core->family, core->cpu_id, core->chip_id);
207 231
208 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { 232 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
209 if ((core->family >= tbl->info.id) && 233 cpu_id, cpu->name, cpu->details,
210 (core->family <= tbl->up_range)) { 234 is_isa_arcompact() ? "ARCompact" : "ARCv2",
211 n += scnprintf(buf + n, len - n, 235 IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
212 "processor [%d]\t: %s (%s ISA) %s\n",
213 cpu_id, tbl->info.str, isa_nm,
214 IS_AVAIL1(be, "[Big-Endian]"));
215 break;
216 }
217 }
218
219 if (tbl->info.id == 0)
220 n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
221 236
222 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", 237 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
223 IS_AVAIL1(cpu->extn.timer0, "Timer0 "), 238 IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
226 CONFIG_ARC_HAS_RTC)); 241 CONFIG_ARC_HAS_RTC));
227 242
228 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", 243 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
229 IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC), 244 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
230 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), 245 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
231 IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); 246 IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
232 247
@@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
253 IS_AVAIL1(cpu->extn.swap, "swap "), 268 IS_AVAIL1(cpu->extn.swap, "swap "),
254 IS_AVAIL1(cpu->extn.minmax, "minmax "), 269 IS_AVAIL1(cpu->extn.minmax, "minmax "),
255 IS_AVAIL1(cpu->extn.crc, "crc "), 270 IS_AVAIL1(cpu->extn.crc, "crc "),
256 IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE)); 271 IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
257 272
258 if (cpu->bpu.ver) 273 if (cpu->bpu.ver)
259 n += scnprintf(buf + n, len - n, 274 n += scnprintf(buf + n, len - n,
@@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
272 287
273 FIX_PTR(cpu); 288 FIX_PTR(cpu);
274 289
275 n += scnprintf(buf + n, len - n, 290 n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
276 "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
277 cpu->vec_base, perip_base, perip_end);
278 291
279 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) 292 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
280 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", 293 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
507 * way to pass it w/o having to kmalloc/free a 2 byte string. 520 * way to pass it w/o having to kmalloc/free a 2 byte string.
508 * Encode cpu-id as 0xFFcccc, which is decoded by show routine. 521 * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
509 */ 522 */
510 return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL; 523 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
511} 524}
512 525
513static void *c_next(struct seq_file *m, void *v, loff_t *pos) 526static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 934150e7ac48..82f9bc819f4a 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
237 if (!user_mode(regs)) 237 if (!user_mode(regs))
238 show_stacktrace(current, regs); 238 show_stacktrace(current, regs);
239} 239}
240
241#ifdef CONFIG_DEBUG_FS
242
243#include <linux/module.h>
244#include <linux/fs.h>
245#include <linux/mount.h>
246#include <linux/pagemap.h>
247#include <linux/init.h>
248#include <linux/namei.h>
249#include <linux/debugfs.h>
250
251static struct dentry *test_dentry;
252static struct dentry *test_dir;
253static struct dentry *test_u32_dentry;
254
255static u32 clr_on_read = 1;
256
257#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
258u32 numitlb, numdtlb, num_pte_not_present;
259
260static int fill_display_data(char *kbuf)
261{
262 size_t num = 0;
263 num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
264 num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
265 num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
266
267 if (clr_on_read)
268 numitlb = numdtlb = num_pte_not_present = 0;
269
270 return num;
271}
272
273static int tlb_stats_open(struct inode *inode, struct file *file)
274{
275 file->private_data = (void *)__get_free_page(GFP_KERNEL);
276 return 0;
277}
278
279/* called on user read(): display the counters */
280static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
281 char __user *user_buf, /* user buffer */
282 size_t len, /* length of buffer */
283 loff_t *offset) /* offset in the file */
284{
285 size_t num;
286 char *kbuf = (char *)file->private_data;
287
288 /* All of the data can he shoved in one iteration */
289 if (*offset != 0)
290 return 0;
291
292 num = fill_display_data(kbuf);
293
294 /* simple_read_from_buffer() is helper for copy to user space
295 It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
296 @3 (offset) into the user space address starting at @1 (user_buf).
297 @5 (len) is max size of user buffer
298 */
299 return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
300}
301
302/* called on user write : clears the counters */
303static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
304 size_t length, loff_t *offset)
305{
306 numitlb = numdtlb = num_pte_not_present = 0;
307 return length;
308}
309
310static int tlb_stats_close(struct inode *inode, struct file *file)
311{
312 free_page((unsigned long)(file->private_data));
313 return 0;
314}
315
316static const struct file_operations tlb_stats_file_ops = {
317 .read = tlb_stats_output,
318 .write = tlb_stats_clear,
319 .open = tlb_stats_open,
320 .release = tlb_stats_close
321};
322#endif
323
324static int __init arc_debugfs_init(void)
325{
326 test_dir = debugfs_create_dir("arc", NULL);
327
328#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
329 test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
330 &tlb_stats_file_ops);
331#endif
332
333 test_u32_dentry =
334 debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
335
336 return 0;
337}
338
339module_init(arc_debugfs_init);
340
341static void __exit arc_debugfs_exit(void)
342{
343 debugfs_remove(test_u32_dentry);
344 debugfs_remove(test_dentry);
345 debugfs_remove(test_dir);
346}
347module_exit(arc_debugfs_exit);
348
349#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 97dddbefb86a..2b96cfc3be75 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -22,8 +22,8 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23 23
24static int l2_line_sz; 24static int l2_line_sz;
25int ioc_exists; 25static int ioc_exists;
26volatile int slc_enable = 1, ioc_enable = 1; 26int slc_enable = 1, ioc_enable = 1;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29 29
@@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
53 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 53 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
55 55
56 if (!is_isa_arcv2())
57 return buf;
58
59 p = &cpuinfo_arc700[c].slc; 56 p = &cpuinfo_arc700[c].slc;
60 if (p->ver) 57 if (p->ver)
61 n += scnprintf(buf + n, len - n, 58 n += scnprintf(buf + n, len - n,
62 "SLC\t\t: %uK, %uB Line%s\n", 59 "SLC\t\t: %uK, %uB Line%s\n",
63 p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); 60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
64 61
65 if (ioc_exists) 62 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
66 n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", 63 perip_base,
67 IS_DISABLED_RUN(ioc_enable)); 64 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
68 65
69 return buf; 66 return buf;
70} 67}
@@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
113 } 110 }
114 111
115 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); 112 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
116 if (cbcr.c && ioc_enable) 113 if (cbcr.c)
117 ioc_exists = 1; 114 ioc_exists = 1;
115 else
116 ioc_enable = 0;
118 117
119 /* HS 2.0 didn't have AUX_VOL */ 118 /* HS 2.0 didn't have AUX_VOL */
120 if (cpuinfo_arc700[cpu].core.family > 0x51) { 119 if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1002,7 +1001,7 @@ void arc_cache_init(void)
1002 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); 1001 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
1003 } 1002 }
1004 1003
1005 if (is_isa_arcv2() && ioc_exists) { 1004 if (is_isa_arcv2() && ioc_enable) {
1006 /* IO coherency base - 0x8z */ 1005 /* IO coherency base - 0x8z */
1007 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); 1006 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1008 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ 1007 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 20afc65e22dc..60aab5a7522b 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
45 * -For coherent data, Read/Write to buffers terminate early in cache 45 * -For coherent data, Read/Write to buffers terminate early in cache
46 * (vs. always going to memory - thus are faster) 46 * (vs. always going to memory - thus are faster)
47 */ 47 */
48 if ((is_isa_arcv2() && ioc_exists) || 48 if ((is_isa_arcv2() && ioc_enable) ||
49 (attrs & DMA_ATTR_NON_CONSISTENT)) 49 (attrs & DMA_ATTR_NON_CONSISTENT))
50 need_coh = 0; 50 need_coh = 0;
51 51
@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
97 int is_non_coh = 1; 97 int is_non_coh = 1;
98 98
99 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || 99 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
100 (is_isa_arcv2() && ioc_exists); 100 (is_isa_arcv2() && ioc_enable);
101 101
102 if (PageHighMem(page) || !is_non_coh) 102 if (PageHighMem(page) || !is_non_coh)
103 iounmap((void __force __iomem *)vaddr); 103 iounmap((void __force __iomem *)vaddr);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ec868a9081a1..bdb295e09160 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
793 char super_pg[64] = ""; 793 char super_pg[64] = "";
794 794
795 if (p_mmu->s_pg_sz_m) 795 if (p_mmu->s_pg_sz_m)
796 scnprintf(super_pg, 64, "%dM Super Page%s, ", 796 scnprintf(super_pg, 64, "%dM Super Page %s",
797 p_mmu->s_pg_sz_m, 797 p_mmu->s_pg_sz_m,
798 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); 798 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
799 799
800 n += scnprintf(buf + n, len - n, 800 n += scnprintf(buf + n, len - n,
801 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n", 801 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
802 p_mmu->ver, p_mmu->pg_sz_k, super_pg, 802 p_mmu->ver, p_mmu->pg_sz_k, super_pg,
803 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, 803 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
804 p_mmu->u_dtlb, p_mmu->u_itlb, 804 p_mmu->u_dtlb, p_mmu->u_itlb,
805 IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40)); 805 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
806 806
807 return buf; 807 return buf;
808} 808}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f1967eeb32e7..b30e4e36bb00 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -237,15 +237,6 @@ ex_saved_reg1:
237 237
2382: 2382:
239 239
240#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
241 and.f 0, r0, _PAGE_PRESENT
242 bz 1f
243 ld r3, [num_pte_not_present]
244 add r3, r3, 1
245 st r3, [num_pte_not_present]
2461:
247#endif
248
249.endm 240.endm
250 241
251;----------------------------------------------------------------- 242;-----------------------------------------------------------------
@@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
309 300
310 TLBMISS_FREEUP_REGS 301 TLBMISS_FREEUP_REGS
311 302
312#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
313 ld r0, [@numitlb]
314 add r0, r0, 1
315 st r0, [@numitlb]
316#endif
317
318 ;---------------------------------------------------------------- 303 ;----------------------------------------------------------------
319 ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA 304 ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
320 LOAD_FAULT_PTE 305 LOAD_FAULT_PTE
@@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
349 334
350 TLBMISS_FREEUP_REGS 335 TLBMISS_FREEUP_REGS
351 336
352#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
353 ld r0, [@numdtlb]
354 add r0, r0, 1
355 st r0, [@numdtlb]
356#endif
357
358 ;---------------------------------------------------------------- 337 ;----------------------------------------------------------------
359 ; Get the PTE corresponding to V-addr accessed 338 ; Get the PTE corresponding to V-addr accessed
360 ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA 339 ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 03e9273f1876..08bb84f2ad58 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1312,6 +1312,13 @@ static int init_hyp_mode(void)
1312 goto out_err; 1312 goto out_err;
1313 } 1313 }
1314 1314
1315 err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
1316 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1317 if (err) {
1318 kvm_err("Cannot map bss section\n");
1319 goto out_err;
1320 }
1321
1315 /* 1322 /*
1316 * Map the Hyp stack pages 1323 * Map the Hyp stack pages
1317 */ 1324 */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 30398dbc940a..969ef880d234 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -915,7 +915,7 @@ config RANDOMIZE_BASE
915 915
916config RANDOMIZE_MODULE_REGION_FULL 916config RANDOMIZE_MODULE_REGION_FULL
917 bool "Randomize the module region independently from the core kernel" 917 bool "Randomize the module region independently from the core kernel"
918 depends on RANDOMIZE_BASE 918 depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
919 default y 919 default y
920 help 920 help
921 Randomizes the location of the module region without considering the 921 Randomizes the location of the module region without considering the
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index ab51aed6b6c1..3635b8662724 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
15GZFLAGS :=-9 15GZFLAGS :=-9
16 16
17ifneq ($(CONFIG_RELOCATABLE),) 17ifneq ($(CONFIG_RELOCATABLE),)
18LDFLAGS_vmlinux += -pie -Bsymbolic 18LDFLAGS_vmlinux += -pie -shared -Bsymbolic
19endif 19endif
20 20
21ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) 21ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 758d74fedfad..a27c3245ba21 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -94,7 +94,7 @@ struct arm64_cpu_capabilities {
94 u16 capability; 94 u16 capability;
95 int def_scope; /* default scope */ 95 int def_scope; /* default scope */
96 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); 96 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
97 void (*enable)(void *); /* Called on all active CPUs */ 97 int (*enable)(void *); /* Called on all active CPUs */
98 union { 98 union {
99 struct { /* To be used for erratum handling only */ 99 struct { /* To be used for erratum handling only */
100 u32 midr_model; 100 u32 midr_model;
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
index db0563c23482..f7865dd9d868 100644
--- a/arch/arm64/include/asm/exec.h
+++ b/arch/arm64/include/asm/exec.h
@@ -18,6 +18,9 @@
18#ifndef __ASM_EXEC_H 18#ifndef __ASM_EXEC_H
19#define __ASM_EXEC_H 19#define __ASM_EXEC_H
20 20
21#include <linux/sched.h>
22
21extern unsigned long arch_align_stack(unsigned long sp); 23extern unsigned long arch_align_stack(unsigned long sp);
24void uao_thread_switch(struct task_struct *next);
22 25
23#endif /* __ASM_EXEC_H */ 26#endif /* __ASM_EXEC_H */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fd9d5fd788f5..f5ea0ba70f07 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -178,11 +178,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
178 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 178 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
179} 179}
180 180
181static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
182{
183 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
184}
185
186static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 181static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
187{ 182{
188 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); 183 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
@@ -203,6 +198,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
203 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 198 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
204} 199}
205 200
201static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
202{
203 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
204 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
205}
206
206static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 207static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
207{ 208{
208 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 209 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ba62df8c6e35..b71086d25195 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
217#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 217#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
218#else 218#else
219#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 219#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
220#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 220#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
221 221
222#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 222#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
223#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 223#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..06ff7fd9e81f 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,6 +17,7 @@
17#define __ASM_MODULE_H 17#define __ASM_MODULE_H
18 18
19#include <asm-generic/module.h> 19#include <asm-generic/module.h>
20#include <asm/memory.h>
20 21
21#define MODULE_ARCH_VERMAGIC "aarch64" 22#define MODULE_ARCH_VERMAGIC "aarch64"
22 23
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
32 Elf64_Sym *sym); 33 Elf64_Sym *sym);
33 34
34#ifdef CONFIG_RANDOMIZE_BASE 35#ifdef CONFIG_RANDOMIZE_BASE
36#ifdef CONFIG_MODVERSIONS
37#define ARCH_RELOCATES_KCRCTAB
38#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
39#endif
35extern u64 module_alloc_base; 40extern u64 module_alloc_base;
36#else 41#else
37#define module_alloc_base ((u64)_etext - MODULES_VSIZE) 42#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 2fee2f59288c..5394c8405e66 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
44 \ 44 \
45 switch (size) { \ 45 switch (size) { \
46 case 1: \ 46 case 1: \
47 do { \ 47 asm ("//__per_cpu_" #op "_1\n" \
48 asm ("//__per_cpu_" #op "_1\n" \ 48 "1: ldxrb %w[ret], %[ptr]\n" \
49 "ldxrb %w[ret], %[ptr]\n" \
50 #asm_op " %w[ret], %w[ret], %w[val]\n" \ 49 #asm_op " %w[ret], %w[ret], %w[val]\n" \
51 "stxrb %w[loop], %w[ret], %[ptr]\n" \ 50 " stxrb %w[loop], %w[ret], %[ptr]\n" \
52 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 51 " cbnz %w[loop], 1b" \
53 [ptr] "+Q"(*(u8 *)ptr) \ 52 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
54 : [val] "Ir" (val)); \ 53 [ptr] "+Q"(*(u8 *)ptr) \
55 } while (loop); \ 54 : [val] "Ir" (val)); \
56 break; \ 55 break; \
57 case 2: \ 56 case 2: \
58 do { \ 57 asm ("//__per_cpu_" #op "_2\n" \
59 asm ("//__per_cpu_" #op "_2\n" \ 58 "1: ldxrh %w[ret], %[ptr]\n" \
60 "ldxrh %w[ret], %[ptr]\n" \
61 #asm_op " %w[ret], %w[ret], %w[val]\n" \ 59 #asm_op " %w[ret], %w[ret], %w[val]\n" \
62 "stxrh %w[loop], %w[ret], %[ptr]\n" \ 60 " stxrh %w[loop], %w[ret], %[ptr]\n" \
63 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 61 " cbnz %w[loop], 1b" \
64 [ptr] "+Q"(*(u16 *)ptr) \ 62 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
65 : [val] "Ir" (val)); \ 63 [ptr] "+Q"(*(u16 *)ptr) \
66 } while (loop); \ 64 : [val] "Ir" (val)); \
67 break; \ 65 break; \
68 case 4: \ 66 case 4: \
69 do { \ 67 asm ("//__per_cpu_" #op "_4\n" \
70 asm ("//__per_cpu_" #op "_4\n" \ 68 "1: ldxr %w[ret], %[ptr]\n" \
71 "ldxr %w[ret], %[ptr]\n" \
72 #asm_op " %w[ret], %w[ret], %w[val]\n" \ 69 #asm_op " %w[ret], %w[ret], %w[val]\n" \
73 "stxr %w[loop], %w[ret], %[ptr]\n" \ 70 " stxr %w[loop], %w[ret], %[ptr]\n" \
74 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 71 " cbnz %w[loop], 1b" \
75 [ptr] "+Q"(*(u32 *)ptr) \ 72 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
76 : [val] "Ir" (val)); \ 73 [ptr] "+Q"(*(u32 *)ptr) \
77 } while (loop); \ 74 : [val] "Ir" (val)); \
78 break; \ 75 break; \
79 case 8: \ 76 case 8: \
80 do { \ 77 asm ("//__per_cpu_" #op "_8\n" \
81 asm ("//__per_cpu_" #op "_8\n" \ 78 "1: ldxr %[ret], %[ptr]\n" \
82 "ldxr %[ret], %[ptr]\n" \
83 #asm_op " %[ret], %[ret], %[val]\n" \ 79 #asm_op " %[ret], %[ret], %[val]\n" \
84 "stxr %w[loop], %[ret], %[ptr]\n" \ 80 " stxr %w[loop], %[ret], %[ptr]\n" \
85 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 81 " cbnz %w[loop], 1b" \
86 [ptr] "+Q"(*(u64 *)ptr) \ 82 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
87 : [val] "Ir" (val)); \ 83 [ptr] "+Q"(*(u64 *)ptr) \
88 } while (loop); \ 84 : [val] "Ir" (val)); \
89 break; \ 85 break; \
90 default: \ 86 default: \
91 BUILD_BUG(); \ 87 BUILD_BUG(); \
@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
150 146
151 switch (size) { 147 switch (size) {
152 case 1: 148 case 1:
153 do { 149 asm ("//__percpu_xchg_1\n"
154 asm ("//__percpu_xchg_1\n" 150 "1: ldxrb %w[ret], %[ptr]\n"
155 "ldxrb %w[ret], %[ptr]\n" 151 " stxrb %w[loop], %w[val], %[ptr]\n"
156 "stxrb %w[loop], %w[val], %[ptr]\n" 152 " cbnz %w[loop], 1b"
157 : [loop] "=&r"(loop), [ret] "=&r"(ret), 153 : [loop] "=&r"(loop), [ret] "=&r"(ret),
158 [ptr] "+Q"(*(u8 *)ptr) 154 [ptr] "+Q"(*(u8 *)ptr)
159 : [val] "r" (val)); 155 : [val] "r" (val));
160 } while (loop);
161 break; 156 break;
162 case 2: 157 case 2:
163 do { 158 asm ("//__percpu_xchg_2\n"
164 asm ("//__percpu_xchg_2\n" 159 "1: ldxrh %w[ret], %[ptr]\n"
165 "ldxrh %w[ret], %[ptr]\n" 160 " stxrh %w[loop], %w[val], %[ptr]\n"
166 "stxrh %w[loop], %w[val], %[ptr]\n" 161 " cbnz %w[loop], 1b"
167 : [loop] "=&r"(loop), [ret] "=&r"(ret), 162 : [loop] "=&r"(loop), [ret] "=&r"(ret),
168 [ptr] "+Q"(*(u16 *)ptr) 163 [ptr] "+Q"(*(u16 *)ptr)
169 : [val] "r" (val)); 164 : [val] "r" (val));
170 } while (loop);
171 break; 165 break;
172 case 4: 166 case 4:
173 do { 167 asm ("//__percpu_xchg_4\n"
174 asm ("//__percpu_xchg_4\n" 168 "1: ldxr %w[ret], %[ptr]\n"
175 "ldxr %w[ret], %[ptr]\n" 169 " stxr %w[loop], %w[val], %[ptr]\n"
176 "stxr %w[loop], %w[val], %[ptr]\n" 170 " cbnz %w[loop], 1b"
177 : [loop] "=&r"(loop), [ret] "=&r"(ret), 171 : [loop] "=&r"(loop), [ret] "=&r"(ret),
178 [ptr] "+Q"(*(u32 *)ptr) 172 [ptr] "+Q"(*(u32 *)ptr)
179 : [val] "r" (val)); 173 : [val] "r" (val));
180 } while (loop);
181 break; 174 break;
182 case 8: 175 case 8:
183 do { 176 asm ("//__percpu_xchg_8\n"
184 asm ("//__percpu_xchg_8\n" 177 "1: ldxr %[ret], %[ptr]\n"
185 "ldxr %[ret], %[ptr]\n" 178 " stxr %w[loop], %[val], %[ptr]\n"
186 "stxr %w[loop], %[val], %[ptr]\n" 179 " cbnz %w[loop], 1b"
187 : [loop] "=&r"(loop), [ret] "=&r"(ret), 180 : [loop] "=&r"(loop), [ret] "=&r"(ret),
188 [ptr] "+Q"(*(u64 *)ptr) 181 [ptr] "+Q"(*(u64 *)ptr)
189 : [val] "r" (val)); 182 : [val] "r" (val));
190 } while (loop);
191 break; 183 break;
192 default: 184 default:
193 BUILD_BUG(); 185 BUILD_BUG();
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index df2e53d3a969..60e34824e18c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
188 188
189#endif 189#endif
190 190
191void cpu_enable_pan(void *__unused); 191int cpu_enable_pan(void *__unused);
192void cpu_enable_uao(void *__unused); 192int cpu_enable_uao(void *__unused);
193void cpu_enable_cache_maint_trap(void *__unused); 193int cpu_enable_cache_maint_trap(void *__unused);
194 194
195#endif /* __ASM_PROCESSOR_H */ 195#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e8d46e8e6079..6c80b3699cb8 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,7 +286,7 @@ asm(
286 286
287#define write_sysreg_s(v, r) do { \ 287#define write_sysreg_s(v, r) do { \
288 u64 __val = (u64)v; \ 288 u64 __val = (u64)v; \
289 asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \ 289 asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
290} while (0) 290} while (0)
291 291
292static inline void config_sctlr_el1(u32 clear, u32 set) 292static inline void config_sctlr_el1(u32 clear, u32 set)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index bcaf6fba1b65..55d0adbf6509 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -21,6 +21,7 @@
21/* 21/*
22 * User space memory access functions 22 * User space memory access functions
23 */ 23 */
24#include <linux/bitops.h>
24#include <linux/kasan-checks.h> 25#include <linux/kasan-checks.h>
25#include <linux/string.h> 26#include <linux/string.h>
26#include <linux/thread_info.h> 27#include <linux/thread_info.h>
@@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
102 flag; \ 103 flag; \
103}) 104})
104 105
106/*
107 * When dealing with data aborts or instruction traps we may end up with
108 * a tagged userland pointer. Clear the tag to get a sane pointer to pass
109 * on to access_ok(), for instance.
110 */
111#define untagged_addr(addr) sign_extend64(addr, 55)
112
105#define access_ok(type, addr, size) __range_ok(addr, size) 113#define access_ok(type, addr, size) __range_ok(addr, size)
106#define user_addr_max get_fs 114#define user_addr_max get_fs
107 115
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 42ffdb54e162..b0988bb1bf64 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
280/* 280/*
281 * Error-checking SWP macros implemented using ldxr{b}/stxr{b} 281 * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
282 */ 282 */
283#define __user_swpX_asm(data, addr, res, temp, B) \ 283
284/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
285#define __SWP_LL_SC_LOOPS 4
286
287#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
284 __asm__ __volatile__( \ 288 __asm__ __volatile__( \
289 " mov %w3, %w7\n" \
285 ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 290 ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
286 CONFIG_ARM64_PAN) \ 291 CONFIG_ARM64_PAN) \
287 "0: ldxr"B" %w2, [%3]\n" \ 292 "0: ldxr"B" %w2, [%4]\n" \
288 "1: stxr"B" %w0, %w1, [%3]\n" \ 293 "1: stxr"B" %w0, %w1, [%4]\n" \
289 " cbz %w0, 2f\n" \ 294 " cbz %w0, 2f\n" \
290 " mov %w0, %w4\n" \ 295 " sub %w3, %w3, #1\n" \
296 " cbnz %w3, 0b\n" \
297 " mov %w0, %w5\n" \
291 " b 3f\n" \ 298 " b 3f\n" \
292 "2:\n" \ 299 "2:\n" \
293 " mov %w1, %w2\n" \ 300 " mov %w1, %w2\n" \
294 "3:\n" \ 301 "3:\n" \
295 " .pushsection .fixup,\"ax\"\n" \ 302 " .pushsection .fixup,\"ax\"\n" \
296 " .align 2\n" \ 303 " .align 2\n" \
297 "4: mov %w0, %w5\n" \ 304 "4: mov %w0, %w6\n" \
298 " b 3b\n" \ 305 " b 3b\n" \
299 " .popsection" \ 306 " .popsection" \
300 _ASM_EXTABLE(0b, 4b) \ 307 _ASM_EXTABLE(0b, 4b) \
301 _ASM_EXTABLE(1b, 4b) \ 308 _ASM_EXTABLE(1b, 4b) \
302 ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 309 ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
303 CONFIG_ARM64_PAN) \ 310 CONFIG_ARM64_PAN) \
304 : "=&r" (res), "+r" (data), "=&r" (temp) \ 311 : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
305 : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ 312 : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
313 "i" (__SWP_LL_SC_LOOPS) \
306 : "memory") 314 : "memory")
307 315
308#define __user_swp_asm(data, addr, res, temp) \ 316#define __user_swp_asm(data, addr, res, temp, temp2) \
309 __user_swpX_asm(data, addr, res, temp, "") 317 __user_swpX_asm(data, addr, res, temp, temp2, "")
310#define __user_swpb_asm(data, addr, res, temp) \ 318#define __user_swpb_asm(data, addr, res, temp, temp2) \
311 __user_swpX_asm(data, addr, res, temp, "b") 319 __user_swpX_asm(data, addr, res, temp, temp2, "b")
312 320
313/* 321/*
314 * Bit 22 of the instruction encoding distinguishes between 322 * Bit 22 of the instruction encoding distinguishes between
@@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
328 } 336 }
329 337
330 while (1) { 338 while (1) {
331 unsigned long temp; 339 unsigned long temp, temp2;
332 340
333 if (type == TYPE_SWPB) 341 if (type == TYPE_SWPB)
334 __user_swpb_asm(*data, address, res, temp); 342 __user_swpb_asm(*data, address, res, temp, temp2);
335 else 343 else
336 __user_swp_asm(*data, address, res, temp); 344 __user_swp_asm(*data, address, res, temp, temp2);
337 345
338 if (likely(res != -EAGAIN) || signal_pending(current)) 346 if (likely(res != -EAGAIN) || signal_pending(current))
339 break; 347 break;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0150394f4cab..b75e917aac46 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); 39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
40} 40}
41 41
42static void cpu_enable_trap_ctr_access(void *__unused) 42static int cpu_enable_trap_ctr_access(void *__unused)
43{ 43{
44 /* Clear SCTLR_EL1.UCT */ 44 /* Clear SCTLR_EL1.UCT */
45 config_sctlr_el1(SCTLR_EL1_UCT, 0); 45 config_sctlr_el1(SCTLR_EL1_UCT, 0);
46 return 0;
46} 47}
47 48
48#define MIDR_RANGE(model, min, max) \ 49#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d577f263cc4a..c02504ea304b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -19,7 +19,9 @@
19#define pr_fmt(fmt) "CPU features: " fmt 19#define pr_fmt(fmt) "CPU features: " fmt
20 20
21#include <linux/bsearch.h> 21#include <linux/bsearch.h>
22#include <linux/cpumask.h>
22#include <linux/sort.h> 23#include <linux/sort.h>
24#include <linux/stop_machine.h>
23#include <linux/types.h> 25#include <linux/types.h>
24#include <asm/cpu.h> 26#include <asm/cpu.h>
25#include <asm/cpufeature.h> 27#include <asm/cpufeature.h>
@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
941{ 943{
942 for (; caps->matches; caps++) 944 for (; caps->matches; caps++)
943 if (caps->enable && cpus_have_cap(caps->capability)) 945 if (caps->enable && cpus_have_cap(caps->capability))
944 on_each_cpu(caps->enable, NULL, true); 946 /*
947 * Use stop_machine() as it schedules the work allowing
948 * us to modify PSTATE, instead of on_each_cpu() which
949 * uses an IPI, giving us a PSTATE that disappears when
950 * we return.
951 */
952 stop_machine(caps->enable, NULL, cpu_online_mask);
945} 953}
946 954
947/* 955/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 427f6d3f084c..332e33193ccf 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
586 b.lt 4f // Skip if no PMU present 586 b.lt 4f // Skip if no PMU present
587 mrs x0, pmcr_el0 // Disable debug access traps 587 mrs x0, pmcr_el0 // Disable debug access traps
588 ubfx x0, x0, #11, #5 // to EL2 and allow access to 588 ubfx x0, x0, #11, #5 // to EL2 and allow access to
589 msr mdcr_el2, x0 // all PMU counters from EL1
5904: 5894:
590 csel x0, xzr, x0, lt // all PMU counters from EL1
591 msr mdcr_el2, x0 // (if they exist)
591 592
592 /* Stage-2 translation */ 593 /* Stage-2 translation */
593 msr vttbr_el2, xzr 594 msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 27b2f1387df4..01753cd7d3f0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -49,6 +49,7 @@
49#include <asm/alternative.h> 49#include <asm/alternative.h>
50#include <asm/compat.h> 50#include <asm/compat.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52#include <asm/exec.h>
52#include <asm/fpsimd.h> 53#include <asm/fpsimd.h>
53#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
54#include <asm/processor.h> 55#include <asm/processor.h>
@@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs)
186 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 187 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
187 regs->pc, lr, regs->pstate); 188 regs->pc, lr, regs->pstate);
188 printk("sp : %016llx\n", sp); 189 printk("sp : %016llx\n", sp);
189 for (i = top_reg; i >= 0; i--) { 190
191 i = top_reg;
192
193 while (i >= 0) {
190 printk("x%-2d: %016llx ", i, regs->regs[i]); 194 printk("x%-2d: %016llx ", i, regs->regs[i]);
191 if (i % 2 == 0) 195 i--;
192 printk("\n"); 196
197 if (i % 2 == 0) {
198 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
199 i--;
200 }
201
202 pr_cont("\n");
193 } 203 }
194 printk("\n"); 204 printk("\n");
195} 205}
@@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next)
301} 311}
302 312
303/* Restore the UAO state depending on next's addr_limit */ 313/* Restore the UAO state depending on next's addr_limit */
304static void uao_thread_switch(struct task_struct *next) 314void uao_thread_switch(struct task_struct *next)
305{ 315{
306 if (IS_ENABLED(CONFIG_ARM64_UAO)) { 316 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
307 if (task_thread_info(next)->addr_limit == KERNEL_DS) 317 if (task_thread_info(next)->addr_limit == KERNEL_DS)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b8799e7c79de..1bec41b5fda3 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -135,7 +135,7 @@ ENTRY(_cpu_resume)
135 135
136#ifdef CONFIG_KASAN 136#ifdef CONFIG_KASAN
137 mov x0, sp 137 mov x0, sp
138 bl kasan_unpoison_remaining_stack 138 bl kasan_unpoison_task_stack_below
139#endif 139#endif
140 140
141 ldp x19, x20, [x29, #16] 141 ldp x19, x20, [x29, #16]
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d3f151cfd4a1..8507703dabe4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
544 return; 544 return;
545 } 545 }
546 bootcpu_valid = true; 546 bootcpu_valid = true;
547 early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
547 return; 548 return;
548 } 549 }
549 550
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index ad734142070d..bb0cd787a9d3 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,8 +1,11 @@
1#include <linux/ftrace.h> 1#include <linux/ftrace.h>
2#include <linux/percpu.h> 2#include <linux/percpu.h>
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <asm/alternative.h>
4#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
6#include <asm/cpufeature.h>
5#include <asm/debug-monitors.h> 7#include <asm/debug-monitors.h>
8#include <asm/exec.h>
6#include <asm/pgtable.h> 9#include <asm/pgtable.h>
7#include <asm/memory.h> 10#include <asm/memory.h>
8#include <asm/mmu_context.h> 11#include <asm/mmu_context.h>
@@ -50,6 +53,14 @@ void notrace __cpu_suspend_exit(void)
50 set_my_cpu_offset(per_cpu_offset(cpu)); 53 set_my_cpu_offset(per_cpu_offset(cpu));
51 54
52 /* 55 /*
56 * PSTATE was not saved over suspend/resume, re-enable any detected
57 * features that might not have been set correctly.
58 */
59 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
60 CONFIG_ARM64_PAN));
61 uao_thread_switch(current);
62
63 /*
53 * Restore HW breakpoint registers to sane values 64 * Restore HW breakpoint registers to sane values
54 * before debug exceptions are possibly reenabled 65 * before debug exceptions are possibly reenabled
55 * through local_dbg_restore. 66 * through local_dbg_restore.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5ff020f8fb7f..c9986b3e0a96 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
428 force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); 428 force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
429} 429}
430 430
431void cpu_enable_cache_maint_trap(void *__unused) 431int cpu_enable_cache_maint_trap(void *__unused)
432{ 432{
433 config_sctlr_el1(SCTLR_EL1_UCI, 0); 433 config_sctlr_el1(SCTLR_EL1_UCI, 0);
434 return 0;
434} 435}
435 436
436#define __user_cache_maint(insn, address, res) \ 437#define __user_cache_maint(insn, address, res) \
437 asm volatile ( \ 438 if (untagged_addr(address) >= user_addr_max()) \
438 "1: " insn ", %1\n" \ 439 res = -EFAULT; \
439 " mov %w0, #0\n" \ 440 else \
440 "2:\n" \ 441 asm volatile ( \
441 " .pushsection .fixup,\"ax\"\n" \ 442 "1: " insn ", %1\n" \
442 " .align 2\n" \ 443 " mov %w0, #0\n" \
443 "3: mov %w0, %w2\n" \ 444 "2:\n" \
444 " b 2b\n" \ 445 " .pushsection .fixup,\"ax\"\n" \
445 " .popsection\n" \ 446 " .align 2\n" \
446 _ASM_EXTABLE(1b, 3b) \ 447 "3: mov %w0, %w2\n" \
447 : "=r" (res) \ 448 " b 2b\n" \
448 : "r" (address), "i" (-EFAULT) ) 449 " .popsection\n" \
450 _ASM_EXTABLE(1b, 3b) \
451 : "=r" (res) \
452 : "r" (address), "i" (-EFAULT) )
449 453
450static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) 454static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
451{ 455{
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 53d9159662fe..0f8788374815 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -29,7 +29,9 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/perf_event.h> 31#include <linux/perf_event.h>
32#include <linux/preempt.h>
32 33
34#include <asm/bug.h>
33#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
34#include <asm/exception.h> 36#include <asm/exception.h>
35#include <asm/debug-monitors.h> 37#include <asm/debug-monitors.h>
@@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
670NOKPROBE_SYMBOL(do_debug_exception); 672NOKPROBE_SYMBOL(do_debug_exception);
671 673
672#ifdef CONFIG_ARM64_PAN 674#ifdef CONFIG_ARM64_PAN
673void cpu_enable_pan(void *__unused) 675int cpu_enable_pan(void *__unused)
674{ 676{
677 /*
678 * We modify PSTATE. This won't work from irq context as the PSTATE
679 * is discarded once we return from the exception.
680 */
681 WARN_ON_ONCE(in_interrupt());
682
675 config_sctlr_el1(SCTLR_EL1_SPAN, 0); 683 config_sctlr_el1(SCTLR_EL1_SPAN, 0);
684 asm(SET_PSTATE_PAN(1));
685 return 0;
676} 686}
677#endif /* CONFIG_ARM64_PAN */ 687#endif /* CONFIG_ARM64_PAN */
678 688
@@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused)
683 * We need to enable the feature at runtime (instead of adding it to 693 * We need to enable the feature at runtime (instead of adding it to
684 * PSR_MODE_EL1h) as the feature may not be implemented by the cpu. 694 * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
685 */ 695 */
686void cpu_enable_uao(void *__unused) 696int cpu_enable_uao(void *__unused)
687{ 697{
688 asm(SET_PSTATE_UAO(1)); 698 asm(SET_PSTATE_UAO(1));
699 return 0;
689} 700}
690#endif /* CONFIG_ARM64_UAO */ 701#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 21c489bdeb4e..212c4d1e2f26 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -421,35 +421,35 @@ void __init mem_init(void)
421 421
422 pr_notice("Virtual kernel memory layout:\n"); 422 pr_notice("Virtual kernel memory layout:\n");
423#ifdef CONFIG_KASAN 423#ifdef CONFIG_KASAN
424 pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", 424 pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
425 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); 425 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
426#endif 426#endif
427 pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", 427 pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
428 MLM(MODULES_VADDR, MODULES_END)); 428 MLM(MODULES_VADDR, MODULES_END));
429 pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", 429 pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
430 MLG(VMALLOC_START, VMALLOC_END)); 430 MLG(VMALLOC_START, VMALLOC_END));
431 pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", 431 pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
432 MLK_ROUNDUP(_text, _etext)); 432 MLK_ROUNDUP(_text, _etext));
433 pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", 433 pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
434 MLK_ROUNDUP(__start_rodata, __init_begin)); 434 MLK_ROUNDUP(__start_rodata, __init_begin));
435 pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", 435 pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
436 MLK_ROUNDUP(__init_begin, __init_end)); 436 MLK_ROUNDUP(__init_begin, __init_end));
437 pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", 437 pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
438 MLK_ROUNDUP(_sdata, _edata)); 438 MLK_ROUNDUP(_sdata, _edata));
439 pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", 439 pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
440 MLK_ROUNDUP(__bss_start, __bss_stop)); 440 MLK_ROUNDUP(__bss_start, __bss_stop));
441 pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", 441 pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
442 MLK(FIXADDR_START, FIXADDR_TOP)); 442 MLK(FIXADDR_START, FIXADDR_TOP));
443 pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", 443 pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
444 MLM(PCI_IO_START, PCI_IO_END)); 444 MLM(PCI_IO_START, PCI_IO_END));
445#ifdef CONFIG_SPARSEMEM_VMEMMAP 445#ifdef CONFIG_SPARSEMEM_VMEMMAP
446 pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", 446 pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
447 MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); 447 MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
448 pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", 448 pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
449 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), 449 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
450 (unsigned long)virt_to_page(high_memory))); 450 (unsigned long)virt_to_page(high_memory)));
451#endif 451#endif
452 pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", 452 pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
453 MLM(__phys_to_virt(memblock_start_of_DRAM()), 453 MLM(__phys_to_virt(memblock_start_of_DRAM()),
454 (unsigned long)high_memory)); 454 (unsigned long)high_memory));
455 455
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 778a985c8a70..4b32168cf91a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
147 147
148static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) 148static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
149{ 149{
150 return node_distance(from, to); 150 return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
151} 151}
152 152
153static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, 153static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
223 void *nd; 223 void *nd;
224 int tnid; 224 int tnid;
225 225
226 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", 226 if (start_pfn < end_pfn)
227 nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); 227 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
228 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
229 else
230 pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
228 231
229 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 232 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
230 nd = __va(nd_pa); 233 nd = __va(nd_pa);
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8b8fe671b1a6..8d79286ee4e8 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request,
271 case BFIN_MEM_ACCESS_CORE: 271 case BFIN_MEM_ACCESS_CORE:
272 case BFIN_MEM_ACCESS_CORE_ONLY: 272 case BFIN_MEM_ACCESS_CORE_ONLY:
273 copied = access_process_vm(child, addr, &tmp, 273 copied = access_process_vm(child, addr, &tmp,
274 to_copy, 0); 274 to_copy, FOLL_FORCE);
275 if (copied) 275 if (copied)
276 break; 276 break;
277 277
@@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request,
324 case BFIN_MEM_ACCESS_CORE: 324 case BFIN_MEM_ACCESS_CORE:
325 case BFIN_MEM_ACCESS_CORE_ONLY: 325 case BFIN_MEM_ACCESS_CORE_ONLY:
326 copied = access_process_vm(child, addr, &data, 326 copied = access_process_vm(child, addr, &data,
327 to_copy, 1); 327 to_copy,
328 FOLL_FORCE | FOLL_WRITE);
328 break; 329 break;
329 case BFIN_MEM_ACCESS_DMA: 330 case BFIN_MEM_ACCESS_DMA:
330 if (safe_dma_memcpy(paddr, &data, to_copy)) 331 if (safe_dma_memcpy(paddr, &data, to_copy))
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index b5698c876fcc..0068fd411a84 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
2722 err = get_user_pages((unsigned long int)(oper.indata + prev_ix), 2722 err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
2723 noinpages, 2723 noinpages,
2724 0, /* read access only for in data */ 2724 0, /* read access only for in data */
2725 0, /* no force */
2726 inpages, 2725 inpages,
2727 NULL); 2726 NULL);
2728 2727
@@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
2736 if (oper.do_cipher){ 2735 if (oper.do_cipher){
2737 err = get_user_pages((unsigned long int)oper.cipher_outdata, 2736 err = get_user_pages((unsigned long int)oper.cipher_outdata,
2738 nooutpages, 2737 nooutpages,
2739 1, /* write access for out data */ 2738 FOLL_WRITE, /* write access for out data */
2740 0, /* no force */
2741 outpages, 2739 outpages,
2742 NULL); 2740 NULL);
2743 up_read(&current->mm->mmap_sem); 2741 up_read(&current->mm->mmap_sem);
@@ -3151,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
3151 printk("print_dma_descriptors start\n"); 3149 printk("print_dma_descriptors start\n");
3152 3150
3153 printk("iop:\n"); 3151 printk("iop:\n");
3154 printk("\tsid: 0x%lld\n", iop->sid); 3152 printk("\tsid: 0x%llx\n", iop->sid);
3155 3153
3156 printk("\tcdesc_out: 0x%p\n", iop->cdesc_out); 3154 printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
3157 printk("\tcdesc_in: 0x%p\n", iop->cdesc_in); 3155 printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f085229cf870..f0df654ac6fc 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
147 /* The trampoline page is globally mapped, no page table to traverse.*/ 147 /* The trampoline page is globally mapped, no page table to traverse.*/
148 tmp = *(unsigned long*)addr; 148 tmp = *(unsigned long*)addr;
149 } else { 149 } else {
150 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 150 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
151 151
152 if (copied != sizeof(tmp)) 152 if (copied != sizeof(tmp))
153 break; 153 break;
@@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc)
279 int opsize = 0; 279 int opsize = 0;
280 280
281 /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */ 281 /* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
282 copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0); 282 copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
283 if (copied != sizeof(opcode)) 283 if (copied != sizeof(opcode))
284 return 0; 284 return 0;
285 285
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index b408fe660cf8..3cef06875f5c 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
31 int cpu; /* cpu we're on */ 31 int cpu; /* cpu we're on */
32 int preempt_count; /* 0 => preemptable, <0 => BUG */ 32 int preempt_count; /* 0 => preemptable, <0 => BUG */
33 mm_segment_t addr_limit; 33 mm_segment_t addr_limit;
34 struct restart_block restart_block;
35}; 34};
36 35
37/* 36/*
@@ -44,9 +43,6 @@ struct thread_info {
44 .cpu = 0, \ 43 .cpu = 0, \
45 .preempt_count = INIT_PREEMPT_COUNT, \ 44 .preempt_count = INIT_PREEMPT_COUNT, \
46 .addr_limit = KERNEL_DS, \ 45 .addr_limit = KERNEL_DS, \
47 .restart_block = { \
48 .fn = do_no_restart_syscall, \
49 }, \
50} 46}
51 47
52#define init_thread_info (init_thread_union.thread_info) 48#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ad1f81f574e5..7138303cbbf2 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
79 unsigned int er0; 79 unsigned int er0;
80 80
81 /* Always make any pending restarted system calls return -EINTR */ 81 /* Always make any pending restarted system calls return -EINTR */
82 current_thread_info()->restart_block.fn = do_no_restart_syscall; 82 current->restart_block.fn = do_no_restart_syscall;
83 83
84 /* restore passed registers */ 84 /* restore passed registers */
85#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0) 85#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 09f845793d12..5ed0ea92c5bf 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
142 u64 virt_addr=simple_strtoull(buf, NULL, 16); 142 u64 virt_addr=simple_strtoull(buf, NULL, 16);
143 int ret; 143 int ret;
144 144
145 ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL); 145 ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
146 if (ret<=0) { 146 if (ret<=0) {
147#ifdef ERR_INJ_DEBUG 147#ifdef ERR_INJ_DEBUG
148 printk("Virtual address %lx is not existing.\n",virt_addr); 148 printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6f54d511cc50..31aa8c0f68e1 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
453 return 0; 453 return 0;
454 } 454 }
455 } 455 }
456 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); 456 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
457 if (copied != sizeof(ret)) 457 if (copied != sizeof(ret))
458 return -EIO; 458 return -EIO;
459 *val = ret; 459 *val = ret;
@@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
489 *ia64_rse_skip_regs(krbs, regnum) = val; 489 *ia64_rse_skip_regs(krbs, regnum) = val;
490 } 490 }
491 } 491 }
492 } else if (access_process_vm(child, addr, &val, sizeof(val), 1) 492 } else if (access_process_vm(child, addr, &val, sizeof(val),
493 FOLL_FORCE | FOLL_WRITE)
493 != sizeof(val)) 494 != sizeof(val))
494 return -EIO; 495 return -EIO;
495 return 0; 496 return 0;
@@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
543 ret = ia64_peek(child, sw, user_rbs_end, addr, &val); 544 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544 if (ret < 0) 545 if (ret < 0)
545 return ret; 546 return ret;
546 if (access_process_vm(child, addr, &val, sizeof(val), 1) 547 if (access_process_vm(child, addr, &val, sizeof(val),
548 FOLL_FORCE | FOLL_WRITE)
547 != sizeof(val)) 549 != sizeof(val))
548 return -EIO; 550 return -EIO;
549 } 551 }
@@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
559 561
560 /* now copy word for word from user rbs to kernel rbs: */ 562 /* now copy word for word from user rbs to kernel rbs: */
561 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 563 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
562 if (access_process_vm(child, addr, &val, sizeof(val), 0) 564 if (access_process_vm(child, addr, &val, sizeof(val),
565 FOLL_FORCE)
563 != sizeof(val)) 566 != sizeof(val))
564 return -EIO; 567 return -EIO;
565 568
@@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request,
1156 case PTRACE_PEEKTEXT: 1159 case PTRACE_PEEKTEXT:
1157 case PTRACE_PEEKDATA: 1160 case PTRACE_PEEKDATA:
1158 /* read word at location addr */ 1161 /* read word at location addr */
1159 if (access_process_vm(child, addr, &data, sizeof(data), 0) 1162 if (access_process_vm(child, addr, &data, sizeof(data),
1163 FOLL_FORCE)
1160 != sizeof(data)) 1164 != sizeof(data))
1161 return -EIO; 1165 return -EIO;
1162 /* ensure return value is not mistaken for error code */ 1166 /* ensure return value is not mistaken for error code */
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 51f5e9aa4901..c145605a981f 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child)
493 int i; 493 int i;
494 494
495 for (i = 0; i < p->nr_trap; i++) 495 for (i = 0; i < p->nr_trap; i++)
496 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1); 496 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
497 FOLL_FORCE | FOLL_WRITE);
497 p->nr_trap = 0; 498 p->nr_trap = 0;
498} 499}
499 500
@@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
537 unsigned long next_insn, code; 538 unsigned long next_insn, code;
538 unsigned long addr = next_pc & ~3; 539 unsigned long addr = next_pc & ~3;
539 540
540 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0) 541 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
542 FOLL_FORCE)
541 != sizeof(next_insn)) { 543 != sizeof(next_insn)) {
542 return -1; /* error */ 544 return -1; /* error */
543 } 545 }
@@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
546 if (register_debug_trap(child, next_pc, next_insn, &code)) { 548 if (register_debug_trap(child, next_pc, next_insn, &code)) {
547 return -1; /* error */ 549 return -1; /* error */
548 } 550 }
549 if (access_process_vm(child, addr, &code, sizeof(code), 1) 551 if (access_process_vm(child, addr, &code, sizeof(code),
552 FOLL_FORCE | FOLL_WRITE)
550 != sizeof(code)) { 553 != sizeof(code)) {
551 return -1; /* error */ 554 return -1; /* error */
552 } 555 }
@@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs)
562 addr = (regs->bpc - 2) & ~3; 565 addr = (regs->bpc - 2) & ~3;
563 regs->bpc -= 2; 566 regs->bpc -= 2;
564 if (unregister_debug_trap(current, addr, &code)) { 567 if (unregister_debug_trap(current, addr, &code)) {
565 access_process_vm(current, addr, &code, sizeof(code), 1); 568 access_process_vm(current, addr, &code, sizeof(code),
569 FOLL_FORCE | FOLL_WRITE);
566 invalidate_cache(); 570 invalidate_cache();
567 } 571 }
568} 572}
@@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child)
589 /* Compute next pc. */ 593 /* Compute next pc. */
590 pc = get_stack_long(child, PT_BPC); 594 pc = get_stack_long(child, PT_BPC);
591 595
592 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) 596 if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
597 FOLL_FORCE)
593 != sizeof(insn)) 598 != sizeof(insn))
594 return; 599 return;
595 600
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 283b5a1967d1..7e71a4e0281b 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
70 break; 70 break;
71 71
72 copied = access_process_vm(child, (u64)addrOthers, &tmp, 72 copied = access_process_vm(child, (u64)addrOthers, &tmp,
73 sizeof(tmp), 0); 73 sizeof(tmp), FOLL_FORCE);
74 if (copied != sizeof(tmp)) 74 if (copied != sizeof(tmp))
75 break; 75 break;
76 ret = put_user(tmp, (u32 __user *) (unsigned long) data); 76 ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
179 break; 179 break;
180 ret = 0; 180 ret = 0;
181 if (access_process_vm(child, (u64)addrOthers, &data, 181 if (access_process_vm(child, (u64)addrOthers, &data,
182 sizeof(data), 1) == sizeof(data)) 182 sizeof(data),
183 FOLL_FORCE | FOLL_WRITE) == sizeof(data))
183 break; 184 break;
184 ret = -EIO; 185 ret = -EIO;
185 break; 186 break;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index ce961495b5e1..622037d851a3 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -14,6 +14,7 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/kdebug.h> 15#include <linux/kdebug.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/uaccess.h>
17#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
18#include <linux/fs.h> 19#include <linux/fs.h>
19#include <linux/bootmem.h> 20#include <linux/bootmem.h>
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 42d124fb6474..d8c3c159289a 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -287,7 +287,7 @@ slow_irqon:
287 pages += nr; 287 pages += nr;
288 288
289 ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, 289 ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
290 write, 0, pages); 290 pages, write ? FOLL_WRITE : 0);
291 291
292 /* Have to be a bit careful with return values */ 292 /* Have to be a bit careful with return values */
293 if (nr > 0) { 293 if (nr > 0) {
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index f7a184b6c35b..57d42d129033 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -32,9 +32,16 @@ static struct addr_range prep_kernel(void)
32 void *addr = 0; 32 void *addr = 0;
33 struct elf_info ei; 33 struct elf_info ei;
34 long len; 34 long len;
35 int uncompressed_image = 0;
35 36
36 partial_decompress(vmlinuz_addr, vmlinuz_size, 37 len = partial_decompress(vmlinuz_addr, vmlinuz_size,
37 elfheader, sizeof(elfheader), 0); 38 elfheader, sizeof(elfheader), 0);
39 /* assume uncompressed data if -1 is returned */
40 if (len == -1) {
41 uncompressed_image = 1;
42 memcpy(elfheader, vmlinuz_addr, sizeof(elfheader));
43 printf("No valid compressed data found, assume uncompressed data\n\r");
44 }
38 45
39 if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei)) 46 if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
40 fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); 47 fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
@@ -67,6 +74,13 @@ static struct addr_range prep_kernel(void)
67 "device tree\n\r"); 74 "device tree\n\r");
68 } 75 }
69 76
77 if (uncompressed_image) {
78 memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize);
79 printf("0x%lx bytes of uncompressed data copied\n\r",
80 ei.loadsize);
81 goto out;
82 }
83
70 /* Finally, decompress the kernel */ 84 /* Finally, decompress the kernel */
71 printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr, 85 printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr,
72 vmlinuz_addr, vmlinuz_addr+vmlinuz_size); 86 vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
@@ -82,7 +96,7 @@ static struct addr_range prep_kernel(void)
82 len, ei.loadsize); 96 len, ei.loadsize);
83 97
84 printf("Done! Decompressed 0x%lx bytes\n\r", len); 98 printf("Done! Decompressed 0x%lx bytes\n\r", len);
85 99out:
86 flush_cache(addr, ei.loadsize); 100 flush_cache(addr, ei.loadsize);
87 101
88 return (struct addr_range){addr, ei.memsize}; 102 return (struct addr_range){addr, ei.memsize};
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 01b8a13f0224..3919332965af 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
26 std r0,0(r1); \ 26 std r0,0(r1); \
27 ptesync; \ 27 ptesync; \
28 ld r0,0(r1); \ 28 ld r0,0(r1); \
291: cmp cr0,r0,r0; \ 291: cmpd cr0,r0,r0; \
30 bne 1b; \ 30 bne 1b; \
31 IDLE_INST; \ 31 IDLE_INST; \
32 b . 32 b .
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 2e4e7d878c8e..84d49b197c32 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -93,6 +93,10 @@
93 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 93 ld reg,PACAKBASE(r13); /* get high part of &label */ \
94 ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; 94 ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
95 95
96#define __LOAD_HANDLER(reg, label) \
97 ld reg,PACAKBASE(r13); \
98 ori reg,reg,(ABS_ADDR(label))@l;
99
96/* Exception register prefixes */ 100/* Exception register prefixes */
97#define EXC_HV H 101#define EXC_HV H
98#define EXC_STD 102#define EXC_STD
@@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
208#define kvmppc_interrupt kvmppc_interrupt_pr 212#define kvmppc_interrupt kvmppc_interrupt_pr
209#endif 213#endif
210 214
215#ifdef CONFIG_RELOCATABLE
216#define BRANCH_TO_COMMON(reg, label) \
217 __LOAD_HANDLER(reg, label); \
218 mtctr reg; \
219 bctr
220
221#else
222#define BRANCH_TO_COMMON(reg, label) \
223 b label
224
225#endif
226
211#define __KVM_HANDLER_PROLOG(area, n) \ 227#define __KVM_HANDLER_PROLOG(area, n) \
212 BEGIN_FTR_SECTION_NESTED(947) \ 228 BEGIN_FTR_SECTION_NESTED(947) \
213 ld r10,area+EX_CFAR(r13); \ 229 ld r10,area+EX_CFAR(r13); \
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f6f68f73e858..99e1397b71da 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
52 return cpumask_subset(mm_cpumask(mm), 52 return cpumask_subset(mm_cpumask(mm),
53 topology_sibling_cpumask(smp_processor_id())); 53 topology_sibling_cpumask(smp_processor_id()));
54} 54}
55
56static inline int mm_is_thread_local(struct mm_struct *mm)
57{
58 return cpumask_equal(mm_cpumask(mm),
59 cpumask_of(smp_processor_id()));
60}
61
55#else 62#else
56static inline int mm_is_core_local(struct mm_struct *mm) 63static inline int mm_is_core_local(struct mm_struct *mm)
57{ 64{
58 return 1; 65 return 1;
59} 66}
67
68static inline int mm_is_thread_local(struct mm_struct *mm)
69{
70 return 1;
71}
60#endif 72#endif
61 73
62#endif /* __KERNEL__ */ 74#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cf12c580f6b2..e8cdfec8d512 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -16,6 +16,10 @@
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18 18
19#define __IGNORE_pkey_mprotect
20#define __IGNORE_pkey_alloc
21#define __IGNORE_pkey_free
22
19#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
20 24
21#include <linux/types.h> 25#include <linux/types.h>
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f129408c6022..08ba447a4b3d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -95,19 +95,35 @@ __start_interrupts:
95/* No virt vectors corresponding with 0x0..0x100 */ 95/* No virt vectors corresponding with 0x0..0x100 */
96EXC_VIRT_NONE(0x4000, 0x4100) 96EXC_VIRT_NONE(0x4000, 0x4100)
97 97
98EXC_REAL_BEGIN(system_reset, 0x100, 0x200) 98
99 SET_SCRATCH0(r13)
100#ifdef CONFIG_PPC_P7_NAP 99#ifdef CONFIG_PPC_P7_NAP
101BEGIN_FTR_SECTION 100 /*
102 /* Running native on arch 2.06 or later, check if we are 101 * If running native on arch 2.06 or later, check if we are waking up
103 * waking up from nap/sleep/winkle. 102 * from nap/sleep/winkle, and branch to idle handler.
104 */ 103 */
105 mfspr r13,SPRN_SRR1 104#define IDLETEST(n) \
106 rlwinm. r13,r13,47-31,30,31 105 BEGIN_FTR_SECTION ; \
107 beq 9f 106 mfspr r10,SPRN_SRR1 ; \
107 rlwinm. r10,r10,47-31,30,31 ; \
108 beq- 1f ; \
109 cmpwi cr3,r10,2 ; \
110 BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
1111: \
112 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
113#else
114#define IDLETEST NOTEST
115#endif
108 116
109 cmpwi cr3,r13,2 117EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
110 GET_PACA(r13) 118 SET_SCRATCH0(r13)
119 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
120 IDLETEST, 0x100)
121
122EXC_REAL_END(system_reset, 0x100, 0x200)
123EXC_VIRT_NONE(0x4100, 0x4200)
124
125#ifdef CONFIG_PPC_P7_NAP
126EXC_COMMON_BEGIN(system_reset_idle_common)
111 bl pnv_restore_hyp_resource 127 bl pnv_restore_hyp_resource
112 128
113 li r0,PNV_THREAD_RUNNING 129 li r0,PNV_THREAD_RUNNING
@@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
130 blt cr3,2f 146 blt cr3,2f
131 b pnv_wakeup_loss 147 b pnv_wakeup_loss
1322: b pnv_wakeup_noloss 1482: b pnv_wakeup_noloss
149#endif
133 150
1349:
135END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
136#endif /* CONFIG_PPC_P7_NAP */
137 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
138 NOTEST, 0x100)
139EXC_REAL_END(system_reset, 0x100, 0x200)
140EXC_VIRT_NONE(0x4100, 0x4200)
141EXC_COMMON(system_reset_common, 0x100, system_reset_exception) 151EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
142 152
143#ifdef CONFIG_PPC_PSERIES 153#ifdef CONFIG_PPC_PSERIES
@@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
817TRAMP_KVM(PACA_EXGEN, 0xb00) 827TRAMP_KVM(PACA_EXGEN, 0xb00)
818EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) 828EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
819 829
820 830#define LOAD_SYSCALL_HANDLER(reg) \
821#define LOAD_SYSCALL_HANDLER(reg) \ 831 __LOAD_HANDLER(reg, system_call_common)
822 ld reg,PACAKBASE(r13); \
823 ori reg,reg,(ABS_ADDR(system_call_common))@l;
824 832
825/* Syscall routine is used twice, in reloc-off and reloc-on paths */ 833/* Syscall routine is used twice, in reloc-off and reloc-on paths */
826#define SYSCALL_PSERIES_1 \ 834#define SYSCALL_PSERIES_1 \
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 9781c69eae57..03d089b3ed72 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
275 if (!stepped) { 275 if (!stepped) {
276 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " 276 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
277 "0x%lx will be disabled.", info->address); 277 "0x%lx will be disabled.", info->address);
278 perf_event_disable(bp); 278 perf_event_disable_inatomic(bp);
279 goto out; 279 goto out;
280 } 280 }
281 /* 281 /*
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index bd739fed26e3..72dac0b58061 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
90 * Threads will spin in HMT_LOW until the lock bit is cleared. 90 * Threads will spin in HMT_LOW until the lock bit is cleared.
91 * r14 - pointer to core_idle_state 91 * r14 - pointer to core_idle_state
92 * r15 - used to load contents of core_idle_state 92 * r15 - used to load contents of core_idle_state
93 * r9 - used as a temporary variable
93 */ 94 */
94 95
95core_idle_lock_held: 96core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
99 bne 3b 100 bne 3b
100 HMT_MEDIUM 101 HMT_MEDIUM
101 lwarx r15,0,r14 102 lwarx r15,0,r14
103 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
104 bne core_idle_lock_held
102 blr 105 blr
103 106
104/* 107/*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
163 std r9,_MSR(r1) 166 std r9,_MSR(r1)
164 std r1,PACAR1(r13) 167 std r1,PACAR1(r13)
165 168
166#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
167 /* Tell KVM we're entering idle */
168 li r4,KVM_HWTHREAD_IN_IDLE
169 stb r4,HSTATE_HWTHREAD_STATE(r13)
170#endif
171
172 /* 169 /*
173 * Go to real mode to do the nap, as required by the architecture. 170 * Go to real mode to do the nap, as required by the architecture.
174 * Also, we need to be in real mode before setting hwthread_state, 171 * Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
185 182
186 .globl pnv_enter_arch207_idle_mode 183 .globl pnv_enter_arch207_idle_mode
187pnv_enter_arch207_idle_mode: 184pnv_enter_arch207_idle_mode:
185#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
186 /* Tell KVM we're entering idle */
187 li r4,KVM_HWTHREAD_IN_IDLE
188 /******************************************************/
189 /* N O T E W E L L ! ! ! N O T E W E L L */
190 /* The following store to HSTATE_HWTHREAD_STATE(r13) */
191 /* MUST occur in real mode, i.e. with the MMU off, */
192 /* and the MMU must stay off until we clear this flag */
193 /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
194 /* reset interrupt vector in exceptions-64s.S. */
195 /* The reason is that another thread can switch the */
196 /* MMU to a guest context whenever this flag is set */
197 /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
198 /* that would potentially cause this thread to start */
199 /* executing instructions from guest memory in */
200 /* hypervisor mode, leading to a host crash or data */
201 /* corruption, or worse. */
202 /******************************************************/
203 stb r4,HSTATE_HWTHREAD_STATE(r13)
204#endif
188 stb r3,PACA_THREAD_IDLE_STATE(r13) 205 stb r3,PACA_THREAD_IDLE_STATE(r13)
189 cmpwi cr3,r3,PNV_THREAD_SLEEP 206 cmpwi cr3,r3,PNV_THREAD_SLEEP
190 bge cr3,2f 207 bge cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
250 * r3 - requested stop state 267 * r3 - requested stop state
251 */ 268 */
252power_enter_stop: 269power_enter_stop:
270#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
271 /* Tell KVM we're entering idle */
272 li r4,KVM_HWTHREAD_IN_IDLE
273 /* DO THIS IN REAL MODE! See comment above. */
274 stb r4,HSTATE_HWTHREAD_STATE(r13)
275#endif
253/* 276/*
254 * Check if the requested state is a deep idle state. 277 * Check if the requested state is a deep idle state.
255 */ 278 */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9e7c10fe205f..ce6dc61b15b2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
1012 /* Ensure that restore_math() will restore */ 1012 /* Ensure that restore_math() will restore */
1013 if (msr_diff & MSR_FP) 1013 if (msr_diff & MSR_FP)
1014 current->thread.load_fp = 1; 1014 current->thread.load_fp = 1;
1015#ifdef CONFIG_ALIVEC 1015#ifdef CONFIG_ALTIVEC
1016 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC) 1016 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1017 current->thread.load_vec = 1; 1017 current->thread.load_vec = 1;
1018#endif 1018#endif
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f52b7db327c8..010b7b310237 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
74 break; 74 break;
75 75
76 copied = access_process_vm(child, (u64)addrOthers, &tmp, 76 copied = access_process_vm(child, (u64)addrOthers, &tmp,
77 sizeof(tmp), 0); 77 sizeof(tmp), FOLL_FORCE);
78 if (copied != sizeof(tmp)) 78 if (copied != sizeof(tmp))
79 break; 79 break;
80 ret = put_user(tmp, (u32 __user *)data); 80 ret = put_user(tmp, (u32 __user *)data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
179 break; 179 break;
180 ret = 0; 180 ret = 0;
181 if (access_process_vm(child, (u64)addrOthers, &tmp, 181 if (access_process_vm(child, (u64)addrOthers, &tmp,
182 sizeof(tmp), 1) == sizeof(tmp)) 182 sizeof(tmp),
183 FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
183 break; 184 break;
184 ret = -EIO; 185 ret = -EIO;
185 break; 186 break;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 82ff5de8b1e7..a0ea63ac2b52 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,6 +23,7 @@
23#include <asm/ppc-opcode.h> 23#include <asm/ppc-opcode.h>
24#include <asm/pnv-pci.h> 24#include <asm/pnv-pci.h>
25#include <asm/opal.h> 25#include <asm/opal.h>
26#include <asm/smp.h>
26 27
27#include "book3s_xics.h" 28#include "book3s_xics.h"
28 29
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index bb0354222b11..362954f98029 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
106 switch (REGION_ID(ea)) { 106 switch (REGION_ID(ea)) {
107 case USER_REGION_ID: 107 case USER_REGION_ID:
108 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); 108 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
109 if (mm == NULL)
110 return 1;
109 psize = get_slice_psize(mm, ea); 111 psize = get_slice_psize(mm, ea);
110 ssize = user_segment_size(ea); 112 ssize = user_segment_size(ea);
111 vsid = get_vsid(mm->context.id, ea, ssize); 113 vsid = get_vsid(mm->context.id, ea, ssize);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 75b9cd6150cc..a51c188b81f3 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -845,7 +845,7 @@ void __init dump_numa_cpu_topology(void)
845 return; 845 return;
846 846
847 for_each_online_node(node) { 847 for_each_online_node(node) {
848 printk(KERN_DEBUG "Node %d CPUs:", node); 848 pr_info("Node %d CPUs:", node);
849 849
850 count = 0; 850 count = 0;
851 /* 851 /*
@@ -856,52 +856,18 @@ void __init dump_numa_cpu_topology(void)
856 if (cpumask_test_cpu(cpu, 856 if (cpumask_test_cpu(cpu,
857 node_to_cpumask_map[node])) { 857 node_to_cpumask_map[node])) {
858 if (count == 0) 858 if (count == 0)
859 printk(" %u", cpu); 859 pr_cont(" %u", cpu);
860 ++count; 860 ++count;
861 } else { 861 } else {
862 if (count > 1) 862 if (count > 1)
863 printk("-%u", cpu - 1); 863 pr_cont("-%u", cpu - 1);
864 count = 0; 864 count = 0;
865 } 865 }
866 } 866 }
867 867
868 if (count > 1) 868 if (count > 1)
869 printk("-%u", nr_cpu_ids - 1); 869 pr_cont("-%u", nr_cpu_ids - 1);
870 printk("\n"); 870 pr_cont("\n");
871 }
872}
873
874static void __init dump_numa_memory_topology(void)
875{
876 unsigned int node;
877 unsigned int count;
878
879 if (min_common_depth == -1 || !numa_enabled)
880 return;
881
882 for_each_online_node(node) {
883 unsigned long i;
884
885 printk(KERN_DEBUG "Node %d Memory:", node);
886
887 count = 0;
888
889 for (i = 0; i < memblock_end_of_DRAM();
890 i += (1 << SECTION_SIZE_BITS)) {
891 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
892 if (count == 0)
893 printk(" 0x%lx", i);
894 ++count;
895 } else {
896 if (count > 0)
897 printk("-0x%lx", i);
898 count = 0;
899 }
900 }
901
902 if (count > 0)
903 printk("-0x%lx", i);
904 printk("\n");
905 } 871 }
906} 872}
907 873
@@ -947,8 +913,6 @@ void __init initmem_init(void)
947 913
948 if (parse_numa_properties()) 914 if (parse_numa_properties())
949 setup_nonnuma(); 915 setup_nonnuma();
950 else
951 dump_numa_memory_topology();
952 916
953 memblock_dump_all(); 917 memblock_dump_all();
954 918
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0e49ec541ab5..bda8c43be78a 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
175 if (unlikely(pid == MMU_NO_CONTEXT)) 175 if (unlikely(pid == MMU_NO_CONTEXT))
176 goto no_context; 176 goto no_context;
177 177
178 if (!mm_is_core_local(mm)) { 178 if (!mm_is_thread_local(mm)) {
179 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 179 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
180 180
181 if (lock_tlbie) 181 if (lock_tlbie)
@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
201 if (unlikely(pid == MMU_NO_CONTEXT)) 201 if (unlikely(pid == MMU_NO_CONTEXT))
202 goto no_context; 202 goto no_context;
203 203
204 if (!mm_is_core_local(mm)) { 204 if (!mm_is_thread_local(mm)) {
205 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 205 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
206 206
207 if (lock_tlbie) 207 if (lock_tlbie)
@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
226 pid = mm ? mm->context.id : 0; 226 pid = mm ? mm->context.id : 0;
227 if (unlikely(pid == MMU_NO_CONTEXT)) 227 if (unlikely(pid == MMU_NO_CONTEXT))
228 goto bail; 228 goto bail;
229 if (!mm_is_core_local(mm)) { 229 if (!mm_is_thread_local(mm)) {
230 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 230 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
231 231
232 if (lock_tlbie) 232 if (lock_tlbie)
@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
321{ 321{
322 unsigned long pid; 322 unsigned long pid;
323 unsigned long addr; 323 unsigned long addr;
324 int local = mm_is_core_local(mm); 324 int local = mm_is_thread_local(mm);
325 unsigned long ap = mmu_get_ap(psize); 325 unsigned long ap = mmu_get_ap(psize);
326 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 326 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
327 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; 327 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 64053d9ac3f2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -12,9 +12,7 @@
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14 14
15unsigned long return_address(int depth); 15#define ftrace_return_address(n) __builtin_return_address(n)
16
17#define ftrace_return_address(n) return_address(n)
18 16
19void _mcount(void); 17void _mcount(void);
20void ftrace_caller(void); 18void ftrace_caller(void);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 03323175de30..602af692efdc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -192,7 +192,7 @@ struct task_struct;
192struct mm_struct; 192struct mm_struct;
193struct seq_file; 193struct seq_file;
194 194
195typedef int (*dump_trace_func_t)(void *data, unsigned long address); 195typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
196void dump_trace(dump_trace_func_t func, void *data, 196void dump_trace(dump_trace_func_t func, void *data,
197 struct task_struct *task, unsigned long sp); 197 struct task_struct *task, unsigned long sp);
198 198
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 02613bad8bbb..3066031a73fe 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,6 +9,9 @@
9#include <uapi/asm/unistd.h> 9#include <uapi/asm/unistd.h>
10 10
11#define __IGNORE_time 11#define __IGNORE_time
12#define __IGNORE_pkey_mprotect
13#define __IGNORE_pkey_alloc
14#define __IGNORE_pkey_free
12 15
13#define __ARCH_WANT_OLD_READDIR 16#define __ARCH_WANT_OLD_READDIR
14#define __ARCH_WANT_SYS_ALARM 17#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 43446fa2a4e5..c74c59236f44 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
2014 *ptr++ = '\t'; 2014 *ptr++ = '\t';
2015 ptr += print_insn(ptr, code + start, addr); 2015 ptr += print_insn(ptr, code + start, addr);
2016 start += opsize; 2016 start += opsize;
2017 printk("%s", buffer); 2017 pr_cont("%s", buffer);
2018 ptr = buffer; 2018 ptr = buffer;
2019 ptr += sprintf(ptr, "\n "); 2019 ptr += sprintf(ptr, "\n ");
2020 hops++; 2020 hops++;
2021 } 2021 }
2022 printk("\n"); 2022 pr_cont("\n");
2023} 2023}
2024 2024
2025void print_fn_code(unsigned char *code, unsigned long len) 2025void print_fn_code(unsigned char *code, unsigned long len)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 6693383bc01b..55d4fe174fd9 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
38 if (sp < low || sp > high - sizeof(*sf)) 38 if (sp < low || sp > high - sizeof(*sf))
39 return sp; 39 return sp;
40 sf = (struct stack_frame *) sp; 40 sf = (struct stack_frame *) sp;
41 if (func(data, sf->gprs[8], 0))
42 return sp;
41 /* Follow the backchain. */ 43 /* Follow the backchain. */
42 while (1) { 44 while (1) {
43 if (func(data, sf->gprs[8]))
44 return sp;
45 low = sp; 45 low = sp;
46 sp = sf->back_chain; 46 sp = sf->back_chain;
47 if (!sp) 47 if (!sp)
@@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
49 if (sp <= low || sp > high - sizeof(*sf)) 49 if (sp <= low || sp > high - sizeof(*sf))
50 return sp; 50 return sp;
51 sf = (struct stack_frame *) sp; 51 sf = (struct stack_frame *) sp;
52 if (func(data, sf->gprs[8], 1))
53 return sp;
52 } 54 }
53 /* Zero backchain detected, check for interrupt frame. */ 55 /* Zero backchain detected, check for interrupt frame. */
54 sp = (unsigned long) (sf + 1); 56 sp = (unsigned long) (sf + 1);
@@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
56 return sp; 58 return sp;
57 regs = (struct pt_regs *) sp; 59 regs = (struct pt_regs *) sp;
58 if (!user_mode(regs)) { 60 if (!user_mode(regs)) {
59 if (func(data, regs->psw.addr)) 61 if (func(data, regs->psw.addr, 1))
60 return sp; 62 return sp;
61 } 63 }
62 low = sp; 64 low = sp;
@@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
85} 87}
86EXPORT_SYMBOL_GPL(dump_trace); 88EXPORT_SYMBOL_GPL(dump_trace);
87 89
88struct return_address_data { 90static int show_address(void *data, unsigned long address, int reliable)
89 unsigned long address;
90 int depth;
91};
92
93static int __return_address(void *data, unsigned long address)
94{
95 struct return_address_data *rd = data;
96
97 if (rd->depth--)
98 return 0;
99 rd->address = address;
100 return 1;
101}
102
103unsigned long return_address(int depth)
104{
105 struct return_address_data rd = { .depth = depth + 2 };
106
107 dump_trace(__return_address, &rd, NULL, current_stack_pointer());
108 return rd.address;
109}
110EXPORT_SYMBOL_GPL(return_address);
111
112static int show_address(void *data, unsigned long address)
113{ 91{
114 printk("([<%016lx>] %pSR)\n", address, (void *)address); 92 if (reliable)
93 printk(" [<%016lx>] %pSR \n", address, (void *)address);
94 else
95 printk("([<%016lx>] %pSR)\n", address, (void *)address);
115 return 0; 96 return 0;
116} 97}
117 98
@@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
138 else 119 else
139 stack = (unsigned long *)task->thread.ksp; 120 stack = (unsigned long *)task->thread.ksp;
140 } 121 }
122 printk(KERN_DEFAULT "Stack:\n");
141 for (i = 0; i < 20; i++) { 123 for (i = 0; i < 20; i++) {
142 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 124 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
143 break; 125 break;
144 if ((i * sizeof(long) % 32) == 0) 126 if (i % 4 == 0)
145 printk("%s ", i == 0 ? "" : "\n"); 127 printk(KERN_DEFAULT " ");
146 printk("%016lx ", *stack++); 128 pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
147 } 129 }
148 printk("\n");
149 show_trace(task, (unsigned long)sp); 130 show_trace(task, (unsigned long)sp);
150} 131}
151 132
@@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
163 mode = user_mode(regs) ? "User" : "Krnl"; 144 mode = user_mode(regs) ? "User" : "Krnl";
164 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); 145 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
165 if (!user_mode(regs)) 146 if (!user_mode(regs))
166 printk(" (%pSR)", (void *)regs->psw.addr); 147 pr_cont(" (%pSR)", (void *)regs->psw.addr);
167 printk("\n"); 148 pr_cont("\n");
168 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 149 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
169 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e, 150 "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
170 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm); 151 psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
171 printk(" RI:%x EA:%x", psw->ri, psw->eaba); 152 pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
172 printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 153 printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
173 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 154 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
174 printk(" %016lx %016lx %016lx %016lx\n", 155 printk(" %016lx %016lx %016lx %016lx\n",
175 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 156 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
205 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff, 186 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
206 regs->int_code >> 17, ++die_counter); 187 regs->int_code >> 17, ++die_counter);
207#ifdef CONFIG_PREEMPT 188#ifdef CONFIG_PREEMPT
208 printk("PREEMPT "); 189 pr_cont("PREEMPT ");
209#endif 190#endif
210#ifdef CONFIG_SMP 191#ifdef CONFIG_SMP
211 printk("SMP "); 192 pr_cont("SMP ");
212#endif 193#endif
213 if (debug_pagealloc_enabled()) 194 if (debug_pagealloc_enabled())
214 printk("DEBUG_PAGEALLOC"); 195 pr_cont("DEBUG_PAGEALLOC");
215 printk("\n"); 196 pr_cont("\n");
216 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 197 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
217 print_modules(); 198 print_modules();
218 show_regs(regs); 199 show_regs(regs);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 17431f63de00..955a7b6fa0a4 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
222} 222}
223arch_initcall(service_level_perf_register); 223arch_initcall(service_level_perf_register);
224 224
225static int __perf_callchain_kernel(void *data, unsigned long address) 225static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
226{ 226{
227 struct perf_callchain_entry_ctx *entry = data; 227 struct perf_callchain_entry_ctx *entry = data;
228 228
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 44f84b23d4e5..355db9db8210 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
27 return 1; 27 return 1;
28} 28}
29 29
30static int save_address(void *data, unsigned long address) 30static int save_address(void *data, unsigned long address, int reliable)
31{ 31{
32 return __save_address(data, address, 0); 32 return __save_address(data, address, 0);
33} 33}
34 34
35static int save_address_nosched(void *data, unsigned long address) 35static int save_address_nosched(void *data, unsigned long address, int reliable)
36{ 36{
37 return __save_address(data, address, 1); 37 return __save_address(data, address, 1);
38} 38}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 1cab8a177d0e..7a27eebab28a 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -119,8 +119,13 @@ static int handle_validity(struct kvm_vcpu *vcpu)
119 119
120 vcpu->stat.exit_validity++; 120 vcpu->stat.exit_validity++;
121 trace_kvm_s390_intercept_validity(vcpu, viwhy); 121 trace_kvm_s390_intercept_validity(vcpu, viwhy);
122 WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy); 122 KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
123 return -EOPNOTSUPP; 123 current->pid, vcpu->kvm);
124
125 /* do not warn on invalid runtime instrumentation mode */
126 WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
127 viwhy);
128 return -EINVAL;
124} 129}
125 130
126static int handle_instruction(struct kvm_vcpu *vcpu) 131static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index adb0c34bf431..18d4107e10ee 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
266 /* Try to get the remaining pages with get_user_pages */ 266 /* Try to get the remaining pages with get_user_pages */
267 start += nr << PAGE_SHIFT; 267 start += nr << PAGE_SHIFT;
268 pages += nr; 268 pages += nr;
269 ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); 269 ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
270 write ? FOLL_WRITE : 0);
270 /* Have to be a bit careful with return values */ 271 /* Have to be a bit careful with return values */
271 if (nr > 0) 272 if (nr > 0)
272 ret = (ret < 0) ? nr : ret + nr; 273 ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index cd404aa3931c..4a0c5bce3552 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
217 } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { 217 } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
218 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 218 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
219 } else { 219 } else {
220 hugetlb_bad_size();
220 pr_err("hugepagesz= specifies an unsupported page size %s\n", 221 pr_err("hugepagesz= specifies an unsupported page size %s\n",
221 string); 222 string);
222 return 0; 223 return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f56a39bd8ba6..b3e9d18f2ec6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
151#ifdef CONFIG_MEMORY_HOTPLUG 151#ifdef CONFIG_MEMORY_HOTPLUG
152int arch_add_memory(int nid, u64 start, u64 size, bool for_device) 152int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
153{ 153{
154 unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); 154 unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
155 unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
156 unsigned long start_pfn = PFN_DOWN(start); 155 unsigned long start_pfn = PFN_DOWN(start);
157 unsigned long size_pages = PFN_DOWN(size); 156 unsigned long size_pages = PFN_DOWN(size);
158 unsigned long nr_pages; 157 pg_data_t *pgdat = NODE_DATA(nid);
159 int rc, zone_enum; 158 struct zone *zone;
159 int rc, i;
160 160
161 rc = vmem_add_mapping(start, size); 161 rc = vmem_add_mapping(start, size);
162 if (rc) 162 if (rc)
163 return rc; 163 return rc;
164 164
165 while (size_pages > 0) { 165 for (i = 0; i < MAX_NR_ZONES; i++) {
166 if (start_pfn < dma_end_pfn) { 166 zone = pgdat->node_zones + i;
167 nr_pages = (start_pfn + size_pages > dma_end_pfn) ? 167 if (zone_idx(zone) != ZONE_MOVABLE) {
168 dma_end_pfn - start_pfn : size_pages; 168 /* Add range within existing zone limits, if possible */
169 zone_enum = ZONE_DMA; 169 zone_start_pfn = zone->zone_start_pfn;
170 } else if (start_pfn < normal_end_pfn) { 170 zone_end_pfn = zone->zone_start_pfn +
171 nr_pages = (start_pfn + size_pages > normal_end_pfn) ? 171 zone->spanned_pages;
172 normal_end_pfn - start_pfn : size_pages;
173 zone_enum = ZONE_NORMAL;
174 } else { 172 } else {
175 nr_pages = size_pages; 173 /* Add remaining range to ZONE_MOVABLE */
176 zone_enum = ZONE_MOVABLE; 174 zone_start_pfn = start_pfn;
175 zone_end_pfn = start_pfn + size_pages;
177 } 176 }
178 rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, 177 if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
179 start_pfn, size_pages); 178 continue;
179 nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
180 zone_end_pfn - start_pfn : size_pages;
181 rc = __add_pages(nid, zone, start_pfn, nr_pages);
180 if (rc) 182 if (rc)
181 break; 183 break;
182 start_pfn += nr_pages; 184 start_pfn += nr_pages;
183 size_pages -= nr_pages; 185 size_pages -= nr_pages;
186 if (!size_pages)
187 break;
184 } 188 }
185 if (rc) 189 if (rc)
186 vmem_remove_mapping(start, size); 190 vmem_remove_mapping(start, size);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 16f4c3960b87..9a4de4599c7b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,7 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15 15
16static int __s390_backtrace(void *data, unsigned long address) 16static int __s390_backtrace(void *data, unsigned long address, int reliable)
17{ 17{
18 unsigned int *depth = data; 18 unsigned int *depth = data;
19 19
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 55836188b217..4f7314d5f334 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child,
131{ 131{
132 int copied; 132 int copied;
133 133
134 copied = access_process_vm(child, addr, res, sizeof(*res), 0); 134 copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
135 135
136 return copied != sizeof(*res) ? -EIO : 0; 136 return copied != sizeof(*res) ? -EIO : 0;
137} 137}
@@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child,
142{ 142{
143 int copied; 143 int copied;
144 144
145 copied = access_process_vm(child, addr, res, sizeof(*res), 0); 145 copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
146 146
147 return copied != sizeof(*res) ? -EIO : 0; 147 return copied != sizeof(*res) ? -EIO : 0;
148} 148}
@@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child,
153{ 153{
154 int copied; 154 int copied;
155 155
156 copied = access_process_vm(child, addr, &val, sizeof(val), 1); 156 copied = access_process_vm(child, addr, &val, sizeof(val),
157 FOLL_FORCE | FOLL_WRITE);
157 158
158 return copied != sizeof(val) ? -EIO : 0; 159 return copied != sizeof(val) ? -EIO : 0;
159} 160}
@@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child,
164{ 165{
165 int copied; 166 int copied;
166 167
167 copied = access_process_vm(child, addr, &val, sizeof(val), 1); 168 copied = access_process_vm(child, addr, &val, sizeof(val),
169 FOLL_FORCE | FOLL_WRITE);
168 170
169 return copied != sizeof(val) ? -EIO : 0; 171 return copied != sizeof(val) ? -EIO : 0;
170} 172}
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 00476662ac2c..336f33a419d9 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -31,7 +31,7 @@ isa-y := $(isa-y)-up
31endif 31endif
32 32
33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) 33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
34cflags-$(CONFIG_CPU_J2) := $(call cc-option,-mj2,) 34cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
35cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ 35cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
36 $(call cc-option,-m2a-nofpu,) \ 36 $(call cc-option,-m2a-nofpu,) \
37 $(call cc-option,-m4-nofpu,) 37 $(call cc-option,-m4-nofpu,)
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index e9c2c42031fe..4e21949593cf 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -22,6 +22,16 @@ config SH_DEVICE_TREE
22 have sufficient driver coverage to use this option; do not 22 have sufficient driver coverage to use this option; do not
23 select it if you are using original SuperH hardware. 23 select it if you are using original SuperH hardware.
24 24
25config SH_JCORE_SOC
26 bool "J-Core SoC"
27 depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
28 select CLKSRC_JCORE_PIT
29 select JCORE_AIC
30 default y if CPU_J2
31 help
32 Select this option to include drivers core components of the
33 J-Core SoC, including interrupt controllers and timers.
34
25config SH_SOLUTION_ENGINE 35config SH_SOLUTION_ENGINE
26 bool "SolutionEngine" 36 bool "SolutionEngine"
27 select SOLUTION_ENGINE 37 select SOLUTION_ENGINE
diff --git a/arch/sh/configs/j2_defconfig b/arch/sh/configs/j2_defconfig
index 94d1eca52f72..2eb81ebe3888 100644
--- a/arch/sh/configs/j2_defconfig
+++ b/arch/sh/configs/j2_defconfig
@@ -8,6 +8,7 @@ CONFIG_MEMORY_START=0x10000000
8CONFIG_MEMORY_SIZE=0x04000000 8CONFIG_MEMORY_SIZE=0x04000000
9CONFIG_CPU_BIG_ENDIAN=y 9CONFIG_CPU_BIG_ENDIAN=y
10CONFIG_SH_DEVICE_TREE=y 10CONFIG_SH_DEVICE_TREE=y
11CONFIG_SH_JCORE_SOC=y
11CONFIG_HZ_100=y 12CONFIG_HZ_100=y
12CONFIG_CMDLINE_OVERWRITE=y 13CONFIG_CMDLINE_OVERWRITE=y
13CONFIG_CMDLINE="console=ttyUL0 earlycon" 14CONFIG_CMDLINE="console=ttyUL0 earlycon"
@@ -20,6 +21,7 @@ CONFIG_INET=y
20CONFIG_DEVTMPFS=y 21CONFIG_DEVTMPFS=y
21CONFIG_DEVTMPFS_MOUNT=y 22CONFIG_DEVTMPFS_MOUNT=y
22CONFIG_NETDEVICES=y 23CONFIG_NETDEVICES=y
24CONFIG_SERIAL_EARLYCON=y
23CONFIG_SERIAL_UARTLITE=y 25CONFIG_SERIAL_UARTLITE=y
24CONFIG_SERIAL_UARTLITE_CONSOLE=y 26CONFIG_SERIAL_UARTLITE_CONSOLE=y
25CONFIG_I2C=y 27CONFIG_I2C=y
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 40fa6c8adc43..063c298ba56c 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@ slow_irqon:
258 pages += nr; 258 pages += nr;
259 259
260 ret = get_user_pages_unlocked(start, 260 ret = get_user_pages_unlocked(start,
261 (end - start) >> PAGE_SHIFT, write, 0, pages); 261 (end - start) >> PAGE_SHIFT, pages,
262 write ? FOLL_WRITE : 0);
262 263
263 /* Have to be a bit careful with return values */ 264 /* Have to be a bit careful with return values */
264 if (nr > 0) { 265 if (nr > 0) {
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc4928a089..ac082dd8c67d 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
127 if (copy_from_user(kbuf, (void __user *) uaddr, len)) 127 if (copy_from_user(kbuf, (void __user *) uaddr, len))
128 return -EFAULT; 128 return -EFAULT;
129 } else { 129 } else {
130 int len2 = access_process_vm(target, uaddr, kbuf, len, 0); 130 int len2 = access_process_vm(target, uaddr, kbuf, len,
131 FOLL_FORCE);
131 if (len2 != len) 132 if (len2 != len)
132 return -EFAULT; 133 return -EFAULT;
133 } 134 }
@@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
141 if (copy_to_user((void __user *) uaddr, kbuf, len)) 142 if (copy_to_user((void __user *) uaddr, kbuf, len))
142 return -EFAULT; 143 return -EFAULT;
143 } else { 144 } else {
144 int len2 = access_process_vm(target, uaddr, kbuf, len, 1); 145 int len2 = access_process_vm(target, uaddr, kbuf, len,
146 FOLL_FORCE | FOLL_WRITE);
145 if (len2 != len) 147 if (len2 != len)
146 return -EFAULT; 148 return -EFAULT;
147 } 149 }
@@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target,
505 if (access_process_vm(target, 507 if (access_process_vm(target,
506 (unsigned long) 508 (unsigned long)
507 &reg_window[pos], 509 &reg_window[pos],
508 k, sizeof(*k), 0) 510 k, sizeof(*k),
511 FOLL_FORCE)
509 != sizeof(*k)) 512 != sizeof(*k))
510 return -EFAULT; 513 return -EFAULT;
511 k++; 514 k++;
@@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target,
531 if (access_process_vm(target, 534 if (access_process_vm(target,
532 (unsigned long) 535 (unsigned long)
533 &reg_window[pos], 536 &reg_window[pos],
534 &reg, sizeof(reg), 0) 537 &reg, sizeof(reg),
538 FOLL_FORCE)
535 != sizeof(reg)) 539 != sizeof(reg))
536 return -EFAULT; 540 return -EFAULT;
537 if (access_process_vm(target, 541 if (access_process_vm(target,
538 (unsigned long) u, 542 (unsigned long) u,
539 &reg, sizeof(reg), 1) 543 &reg, sizeof(reg),
544 FOLL_FORCE | FOLL_WRITE)
540 != sizeof(reg)) 545 != sizeof(reg))
541 return -EFAULT; 546 return -EFAULT;
542 pos++; 547 pos++;
@@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target,
615 (unsigned long) 620 (unsigned long)
616 &reg_window[pos], 621 &reg_window[pos],
617 (void *) k, 622 (void *) k,
618 sizeof(*k), 1) 623 sizeof(*k),
624 FOLL_FORCE | FOLL_WRITE)
619 != sizeof(*k)) 625 != sizeof(*k))
620 return -EFAULT; 626 return -EFAULT;
621 k++; 627 k++;
@@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target,
642 if (access_process_vm(target, 648 if (access_process_vm(target,
643 (unsigned long) 649 (unsigned long)
644 u, 650 u,
645 &reg, sizeof(reg), 0) 651 &reg, sizeof(reg),
652 FOLL_FORCE)
646 != sizeof(reg)) 653 != sizeof(reg))
647 return -EFAULT; 654 return -EFAULT;
648 if (access_process_vm(target, 655 if (access_process_vm(target,
649 (unsigned long) 656 (unsigned long)
650 &reg_window[pos], 657 &reg_window[pos],
651 &reg, sizeof(reg), 1) 658 &reg, sizeof(reg),
659 FOLL_FORCE | FOLL_WRITE)
652 != sizeof(reg)) 660 != sizeof(reg))
653 return -EFAULT; 661 return -EFAULT;
654 pos++; 662 pos++;
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 4e06750a5d29..cd0e32bbcb1d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -238,7 +238,8 @@ slow:
238 pages += nr; 238 pages += nr;
239 239
240 ret = get_user_pages_unlocked(start, 240 ret = get_user_pages_unlocked(start,
241 (end - start) >> PAGE_SHIFT, write, 0, pages); 241 (end - start) >> PAGE_SHIFT, pages,
242 write ? FOLL_WRITE : 0);
242 243
243 /* Have to be a bit careful with return values */ 244 /* Have to be a bit careful with return values */
244 if (nr > 0) { 245 if (nr > 0) {
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 77f28ce9c646..9976fcecd17e 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -5,8 +5,8 @@
5OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y 5OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
6OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y 6OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
7 7
8CFLAGS_syscall_64.o += -Wno-override-init 8CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
9CFLAGS_syscall_32.o += -Wno-override-init 9CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
10obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o 10obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
11obj-y += common.o 11obj-y += common.o
12 12
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index ff6ef7b30822..2b3618542544 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -389,5 +389,3 @@
389380 i386 pkey_mprotect sys_pkey_mprotect 389380 i386 pkey_mprotect sys_pkey_mprotect
390381 i386 pkey_alloc sys_pkey_alloc 390381 i386 pkey_alloc sys_pkey_alloc
391382 i386 pkey_free sys_pkey_free 391382 i386 pkey_free sys_pkey_free
392#383 i386 pkey_get sys_pkey_get
393#384 i386 pkey_set sys_pkey_set
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 2f024d02511d..e93ef0b38db8 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -338,8 +338,6 @@
338329 common pkey_mprotect sys_pkey_mprotect 338329 common pkey_mprotect sys_pkey_mprotect
339330 common pkey_alloc sys_pkey_alloc 339330 common pkey_alloc sys_pkey_alloc
340331 common pkey_free sys_pkey_free 340331 common pkey_free sys_pkey_free
341#332 common pkey_get sys_pkey_get
342#333 common pkey_set sys_pkey_set
343 341
344# 342#
345# x32-specific system call numbers start at 512 to avoid cache impact 343# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a3a9eb84b5cf..a74a2dbc0180 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
3607 3607
3608 /* 3608 /*
3609 * Quirk: v2 perfmon does not report fixed-purpose events, so 3609 * Quirk: v2 perfmon does not report fixed-purpose events, so
3610 * assume at least 3 events: 3610 * assume at least 3 events, when not running in a hypervisor:
3611 */ 3611 */
3612 if (version > 1) 3612 if (version > 1) {
3613 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 3613 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
3614
3615 x86_pmu.num_counters_fixed =
3616 max((int)edx.split.num_counters_fixed, assume);
3617 }
3614 3618
3615 if (boot_cpu_has(X86_FEATURE_PDCM)) { 3619 if (boot_cpu_has(X86_FEATURE_PDCM)) {
3616 u64 capabilities; 3620 u64 capabilities;
@@ -3898,6 +3902,7 @@ __init int intel_pmu_init(void)
3898 break; 3902 break;
3899 3903
3900 case INTEL_FAM6_XEON_PHI_KNL: 3904 case INTEL_FAM6_XEON_PHI_KNL:
3905 case INTEL_FAM6_XEON_PHI_KNM:
3901 memcpy(hw_cache_event_ids, 3906 memcpy(hw_cache_event_ids,
3902 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 3907 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3903 memcpy(hw_cache_extra_regs, 3908 memcpy(hw_cache_extra_regs,
@@ -3912,7 +3917,7 @@ __init int intel_pmu_init(void)
3912 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 3917 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3913 x86_pmu.flags |= PMU_FL_NO_HT_SHARING; 3918 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
3914 3919
3915 pr_cont("Knights Landing events, "); 3920 pr_cont("Knights Landing/Mill events, ");
3916 break; 3921 break;
3917 3922
3918 case INTEL_FAM6_SKYLAKE_MOBILE: 3923 case INTEL_FAM6_SKYLAKE_MOBILE:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 3ca87b5a8677..4f5ac726335f 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -48,7 +48,8 @@
48 * Scope: Core 48 * Scope: Core
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter 49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
50 * perf code: 0x02 50 * perf code: 0x02
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL 51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
52 * SKL,KNL
52 * Scope: Core 53 * Scope: Core
53 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter 54 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
54 * perf code: 0x03 55 * perf code: 0x03
@@ -56,15 +57,16 @@
56 * Scope: Core 57 * Scope: Core
57 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. 58 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
58 * perf code: 0x00 59 * perf code: 0x00
59 * Available model: SNB,IVB,HSW,BDW,SKL 60 * Available model: SNB,IVB,HSW,BDW,SKL,KNL
60 * Scope: Package (physical package) 61 * Scope: Package (physical package)
61 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. 62 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
62 * perf code: 0x01 63 * perf code: 0x01
63 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL 64 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
64 * Scope: Package (physical package) 65 * Scope: Package (physical package)
65 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. 66 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
66 * perf code: 0x02 67 * perf code: 0x02
67 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL 68 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
69 * SKL,KNL
68 * Scope: Package (physical package) 70 * Scope: Package (physical package)
69 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. 71 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
70 * perf code: 0x03 72 * perf code: 0x03
@@ -118,6 +120,7 @@ struct cstate_model {
118 120
119/* Quirk flags */ 121/* Quirk flags */
120#define SLM_PKG_C6_USE_C7_MSR (1UL << 0) 122#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
123#define KNL_CORE_C6_MSR (1UL << 1)
121 124
122struct perf_cstate_msr { 125struct perf_cstate_msr {
123 u64 msr; 126 u64 msr;
@@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
488 .quirks = SLM_PKG_C6_USE_C7_MSR, 491 .quirks = SLM_PKG_C6_USE_C7_MSR,
489}; 492};
490 493
494
495static const struct cstate_model knl_cstates __initconst = {
496 .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
497
498 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
499 BIT(PERF_CSTATE_PKG_C3_RES) |
500 BIT(PERF_CSTATE_PKG_C6_RES),
501 .quirks = KNL_CORE_C6_MSR,
502};
503
504
505
491#define X86_CSTATES_MODEL(model, states) \ 506#define X86_CSTATES_MODEL(model, states) \
492 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) } 507 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
493 508
@@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
523 538
524 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), 539 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
525 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), 540 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
541
542 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
526 { }, 543 { },
527}; 544};
528MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 545MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
558 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR) 575 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
559 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY; 576 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
560 577
578 /* KNL has different MSR for CORE C6 */
579 if (cm->quirks & KNL_CORE_C6_MSR)
580 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
581
582
561 has_cstate_core = cstate_probe_msr(cm->core_events, 583 has_cstate_core = cstate_probe_msr(cm->core_events,
562 PERF_CSTATE_CORE_EVENT_MAX, 584 PERF_CSTATE_CORE_EVENT_MAX,
563 core_msr, core_events_attrs); 585 core_msr, core_events_attrs);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index fc6cf21c535e..81b321ace8e0 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -458,8 +458,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
458 if (!x86_pmu.lbr_nr) 458 if (!x86_pmu.lbr_nr)
459 return; 459 return;
460 460
461 if (branch_user_callstack(cpuc->br_sel) && event->ctx && 461 if (branch_user_callstack(cpuc->br_sel) &&
462 event->ctx->task_ctx_data) { 462 event->ctx->task_ctx_data) {
463 task_ctx = event->ctx->task_ctx_data; 463 task_ctx = event->ctx->task_ctx_data;
464 task_ctx->lbr_callstack_users--; 464 task_ctx->lbr_callstack_users--;
465 } 465 }
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index b0f0e835a770..0a535cea8ff3 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -763,6 +763,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init), 763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
764 764
765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init), 765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
766 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
766 767
767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init), 768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init), 769 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d9844cc74486..efca2685d876 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1349,6 +1349,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1349 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init), 1349 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1350 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init), 1350 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1351 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), 1351 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1352 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
1352 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), 1353 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1353 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), 1354 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1354 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), 1355 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 1188bc849ee3..a39629206864 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -194,6 +194,8 @@
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195 195
196#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 196#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
197#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
198#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
197 199
198/* Virtualization flags: Linux defined, word 8 */ 200/* Virtualization flags: Linux defined, word 8 */
199#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 201#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9ae5ab80a497..34a46dc076d3 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -64,5 +64,6 @@
64/* Xeon Phi */ 64/* Xeon Phi */
65 65
66#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ 66#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
67#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
67 68
68#endif /* _ASM_X86_INTEL_FAMILY_H */ 69#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..d34bd370074b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
351#define arch_phys_wc_add arch_phys_wc_add 351#define arch_phys_wc_add arch_phys_wc_add
352#endif 352#endif
353 353
354#ifdef CONFIG_X86_PAT
355extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
356extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
357#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
358#endif
359
354#endif /* _ASM_X86_IO_H */ 360#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 56f4c6676b29..78f3760ca1f2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -88,7 +88,6 @@
88 88
89#define MSR_IA32_RTIT_CTL 0x00000570 89#define MSR_IA32_RTIT_CTL 0x00000570
90#define MSR_IA32_RTIT_STATUS 0x00000571 90#define MSR_IA32_RTIT_STATUS 0x00000571
91#define MSR_IA32_RTIT_STATUS 0x00000571
92#define MSR_IA32_RTIT_ADDR0_A 0x00000580 91#define MSR_IA32_RTIT_ADDR0_A 0x00000580
93#define MSR_IA32_RTIT_ADDR0_B 0x00000581 92#define MSR_IA32_RTIT_ADDR0_B 0x00000581
94#define MSR_IA32_RTIT_ADDR1_A 0x00000582 93#define MSR_IA32_RTIT_ADDR1_A 0x00000582
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 3d33a719f5c1..a34e0d4b957d 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -103,8 +103,10 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
103({ \ 103({ \
104 long tmp; \ 104 long tmp; \
105 struct rw_semaphore* ret; \ 105 struct rw_semaphore* ret; \
106 register void *__sp asm(_ASM_SP); \
107 \
106 asm volatile("# beginning down_write\n\t" \ 108 asm volatile("# beginning down_write\n\t" \
107 LOCK_PREFIX " xadd %1,(%3)\n\t" \ 109 LOCK_PREFIX " xadd %1,(%4)\n\t" \
108 /* adds 0xffff0001, returns the old value */ \ 110 /* adds 0xffff0001, returns the old value */ \
109 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ 111 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
110 /* was the active mask 0 before? */\ 112 /* was the active mask 0 before? */\
@@ -112,7 +114,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
112 " call " slow_path "\n" \ 114 " call " slow_path "\n" \
113 "1:\n" \ 115 "1:\n" \
114 "# ending down_write" \ 116 "# ending down_write" \
115 : "+m" (sem->count), "=d" (tmp), "=a" (ret) \ 117 : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
116 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ 118 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
117 : "memory", "cc"); \ 119 : "memory", "cc"); \
118 ret; \ 120 ret; \
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2aaca53c0974..ad6f5eb07a95 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -52,6 +52,15 @@ struct task_struct;
52#include <asm/cpufeature.h> 52#include <asm/cpufeature.h>
53#include <linux/atomic.h> 53#include <linux/atomic.h>
54 54
55struct thread_info {
56 unsigned long flags; /* low level flags */
57};
58
59#define INIT_THREAD_INFO(tsk) \
60{ \
61 .flags = 0, \
62}
63
55#define init_stack (init_thread_union.stack) 64#define init_stack (init_thread_union.stack)
56 65
57#else /* !__ASSEMBLY__ */ 66#else /* !__ASSEMBLY__ */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8a5abaa7d453..931ced8ca345 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
454 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; 454 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
455 455
456 mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); 456 mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
457 acpi_penalize_sci_irq(bus_irq, trigger, polarity);
457 458
458 /* 459 /*
459 * stash over-ride to indicate we've been here 460 * stash over-ride to indicate we've been here
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 620ab06bcf45..017bda12caae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
429 * We need the physical address of the container for both bitness since 429 * We need the physical address of the container for both bitness since
430 * boot_params.hdr.ramdisk_image is a physical address. 430 * boot_params.hdr.ramdisk_image is a physical address.
431 */ 431 */
432 cont = __pa(container); 432 cont = __pa_nodebug(container);
433 cont_va = container; 433 cont_va = container;
434#endif 434#endif
435 435
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 8cb57df9398d..1db8dc490b66 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -32,6 +32,8 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
32 32
33 static const struct cpuid_bit cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 }, 34 { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
35 { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
36 { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
35 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 37 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
36 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 38 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
37 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 39 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 81160578b91a..5130985b758b 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -27,6 +27,7 @@
27#include <asm/div64.h> 27#include <asm/div64.h>
28#include <asm/x86_init.h> 28#include <asm/x86_init.h>
29#include <asm/hypervisor.h> 29#include <asm/hypervisor.h>
30#include <asm/timer.h>
30#include <asm/apic.h> 31#include <asm/apic.h>
31 32
32#define CPUID_VMWARE_INFO_LEAF 0x40000000 33#define CPUID_VMWARE_INFO_LEAF 0x40000000
@@ -94,6 +95,10 @@ static void __init vmware_platform_setup(void)
94 } else { 95 } else {
95 pr_warn("Failed to get TSC freq from the hypervisor\n"); 96 pr_warn("Failed to get TSC freq from the hypervisor\n");
96 } 97 }
98
99#ifdef CONFIG_X86_IO_APIC
100 no_timer_check = 1;
101#endif
97} 102}
98 103
99/* 104/*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index b85fe5f91c3f..90e8dde3ec26 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -350,7 +350,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
350 * continue building up new bios map based on this 350 * continue building up new bios map based on this
351 * information 351 * information
352 */ 352 */
353 if (current_type != last_type) { 353 if (current_type != last_type || current_type == E820_PRAM) {
354 if (last_type != 0) { 354 if (last_type != 0) {
355 new_bios[new_bios_entry].size = 355 new_bios[new_bios_entry].size =
356 change_point[chgidx]->addr - last_addr; 356 change_point[chgidx]->addr - last_addr;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 124aa5c593f8..095ef7ddd6ae 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -74,6 +74,8 @@ void fpu__xstate_clear_all_cpu_caps(void)
74 setup_clear_cpu_cap(X86_FEATURE_MPX); 74 setup_clear_cpu_cap(X86_FEATURE_MPX);
75 setup_clear_cpu_cap(X86_FEATURE_XGETBV1); 75 setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
76 setup_clear_cpu_cap(X86_FEATURE_PKU); 76 setup_clear_cpu_cap(X86_FEATURE_PKU);
77 setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
78 setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
77} 79}
78 80
79/* 81/*
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 28cee019209c..d9d8d16b69db 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -50,6 +50,7 @@
50#include <linux/kallsyms.h> 50#include <linux/kallsyms.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/frame.h> 52#include <linux/frame.h>
53#include <linux/kasan.h>
53 54
54#include <asm/text-patching.h> 55#include <asm/text-patching.h>
55#include <asm/cacheflush.h> 56#include <asm/cacheflush.h>
@@ -1057,9 +1058,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1057 * tailcall optimization. So, to be absolutely safe 1058 * tailcall optimization. So, to be absolutely safe
1058 * we also save and restore enough stack bytes to cover 1059 * we also save and restore enough stack bytes to cover
1059 * the argument area. 1060 * the argument area.
1061 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
1062 * raw stack chunk with redzones:
1060 */ 1063 */
1061 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 1064 __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
1062 MIN_STACK_SIZE(addr));
1063 regs->flags &= ~X86_EFLAGS_IF; 1065 regs->flags &= ~X86_EFLAGS_IF;
1064 trace_hardirqs_off(); 1066 trace_hardirqs_off();
1065 regs->ip = (unsigned long)(jp->entry); 1067 regs->ip = (unsigned long)(jp->entry);
@@ -1080,6 +1082,9 @@ void jprobe_return(void)
1080{ 1082{
1081 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1083 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1082 1084
1085 /* Unpoison stack redzones in the frames we are going to jump over. */
1086 kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
1087
1083 asm volatile ( 1088 asm volatile (
1084#ifdef CONFIG_X86_64 1089#ifdef CONFIG_X86_64
1085 " xchg %%rbx,%%rsp \n" 1090 " xchg %%rbx,%%rsp \n"
@@ -1118,7 +1123,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1118 /* It's OK to start function graph tracing again */ 1123 /* It's OK to start function graph tracing again */
1119 unpause_graph_tracing(); 1124 unpause_graph_tracing();
1120 *regs = kcb->jprobe_saved_regs; 1125 *regs = kcb->jprobe_saved_regs;
1121 memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); 1126 __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1122 preempt_enable_no_resched(); 1127 preempt_enable_no_resched();
1123 return 1; 1128 return 1;
1124 } 1129 }
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index efe73aacf966..7b0d3da52fb4 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -18,8 +18,10 @@
18 18
19#ifdef CC_USING_FENTRY 19#ifdef CC_USING_FENTRY
20# define function_hook __fentry__ 20# define function_hook __fentry__
21EXPORT_SYMBOL(__fentry__)
21#else 22#else
22# define function_hook mcount 23# define function_hook mcount
24EXPORT_SYMBOL(mcount)
23#endif 25#endif
24 26
25/* All cases save the original rbp (8 bytes) */ 27/* All cases save the original rbp (8 bytes) */
@@ -295,7 +297,6 @@ trace:
295 jmp fgraph_trace 297 jmp fgraph_trace
296END(function_hook) 298END(function_hook)
297#endif /* CONFIG_DYNAMIC_FTRACE */ 299#endif /* CONFIG_DYNAMIC_FTRACE */
298EXPORT_SYMBOL(function_hook)
299#endif /* CONFIG_FUNCTION_TRACER */ 300#endif /* CONFIG_FUNCTION_TRACER */
300 301
301#ifdef CONFIG_FUNCTION_GRAPH_TRACER 302#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 51402a7e4ca6..0bee04d41bed 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
625DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3, 625DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
626 amd_disable_seq_and_redirect_scrub); 626 amd_disable_seq_and_redirect_scrub);
627 627
628#endif
629
630#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) 628#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
631#include <linux/jump_label.h> 629#include <linux/jump_label.h>
632#include <asm/string_64.h> 630#include <asm/string_64.h>
@@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
657DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap); 655DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
658DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap); 656DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
659#endif 657#endif
658#endif
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 40df33753bae..ec1f756f9dc9 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -105,9 +105,6 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
105 /* Don't let flags to be set from userspace */ 105 /* Don't let flags to be set from userspace */
106 act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); 106 act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
107 107
108 if (user_64bit_mode(current_pt_regs()))
109 return;
110
111 if (in_ia32_syscall()) 108 if (in_ia32_syscall())
112 act->sa.sa_flags |= SA_IA32_ABI; 109 act->sa.sa_flags |= SA_IA32_ABI;
113 if (in_x32_syscall()) 110 if (in_x32_syscall())
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 68f8cc222f25..c00cb64bc0a1 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,8 +261,10 @@ static inline void __smp_reschedule_interrupt(void)
261 261
262__visible void smp_reschedule_interrupt(struct pt_regs *regs) 262__visible void smp_reschedule_interrupt(struct pt_regs *regs)
263{ 263{
264 irq_enter();
264 ack_APIC_irq(); 265 ack_APIC_irq();
265 __smp_reschedule_interrupt(); 266 __smp_reschedule_interrupt();
267 irq_exit();
266 /* 268 /*
267 * KVM uses this interrupt to force a cpu out of guest mode 269 * KVM uses this interrupt to force a cpu out of guest mode
268 */ 270 */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 951f093a96fe..42f5eb7b4f6c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1409,15 +1409,17 @@ __init void prefill_possible_map(void)
1409 1409
1410 /* No boot processor was found in mptable or ACPI MADT */ 1410 /* No boot processor was found in mptable or ACPI MADT */
1411 if (!num_processors) { 1411 if (!num_processors) {
1412 int apicid = boot_cpu_physical_apicid; 1412 if (boot_cpu_has(X86_FEATURE_APIC)) {
1413 int cpu = hard_smp_processor_id(); 1413 int apicid = boot_cpu_physical_apicid;
1414 int cpu = hard_smp_processor_id();
1414 1415
1415 pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu); 1416 pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
1416 1417
1417 /* Make sure boot cpu is enumerated */ 1418 /* Make sure boot cpu is enumerated */
1418 if (apic->cpu_present_to_apicid(0) == BAD_APICID && 1419 if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
1419 apic->apic_id_valid(apicid)) 1420 apic->apic_id_valid(apicid))
1420 generic_processor_info(apicid, boot_cpu_apic_version); 1421 generic_processor_info(apicid, boot_cpu_apic_version);
1422 }
1421 1423
1422 if (!num_processors) 1424 if (!num_processors)
1423 num_processors = 1; 1425 num_processors = 1;
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index c9a073866ca7..a23ce84a3f6c 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
57 unsigned char opcode[15]; 57 unsigned char opcode[15];
58 unsigned long addr = convert_ip_to_linear(child, regs); 58 unsigned long addr = convert_ip_to_linear(child, regs);
59 59
60 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); 60 copied = access_process_vm(child, addr, opcode, sizeof(opcode),
61 FOLL_FORCE);
61 for (i = 0; i < copied; i++) { 62 for (i = 0; i < copied; i++) {
62 switch (opcode[i]) { 63 switch (opcode[i]) {
63 /* popf and iret */ 64 /* popf and iret */
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 9298993dc8b7..2d721e533cf4 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
47 get_stack_info(first_frame, state->task, &state->stack_info, 47 get_stack_info(first_frame, state->task, &state->stack_info,
48 &state->stack_mask); 48 &state->stack_mask);
49 49
50 if (!__kernel_text_address(*first_frame)) 50 /*
51 * The caller can provide the address of the first frame directly
52 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
53 * to start unwinding at. Skip ahead until we reach it.
54 */
55 if (!unwind_done(state) &&
56 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
57 !__kernel_text_address(*first_frame)))
51 unwind_next_frame(state); 58 unwind_next_frame(state);
52} 59}
53EXPORT_SYMBOL_GPL(__unwind_start); 60EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index c7220ba94aa7..1a22de70f7f7 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
594 ioapic->irr = 0; 594 ioapic->irr = 0;
595 ioapic->irr_delivered = 0; 595 ioapic->irr_delivered = 0;
596 ioapic->id = 0; 596 ioapic->id = 0;
597 memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); 597 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
598 rtc_irq_eoi_tracking_reset(ioapic); 598 rtc_irq_eoi_tracking_reset(ioapic);
599} 599}
600 600
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c633de84dd7..e375235d81c9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5733,13 +5733,13 @@ static int kvmclock_cpu_online(unsigned int cpu)
5733 5733
5734static void kvm_timer_init(void) 5734static void kvm_timer_init(void)
5735{ 5735{
5736 int cpu;
5737
5738 max_tsc_khz = tsc_khz; 5736 max_tsc_khz = tsc_khz;
5739 5737
5740 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 5738 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5741#ifdef CONFIG_CPU_FREQ 5739#ifdef CONFIG_CPU_FREQ
5742 struct cpufreq_policy policy; 5740 struct cpufreq_policy policy;
5741 int cpu;
5742
5743 memset(&policy, 0, sizeof(policy)); 5743 memset(&policy, 0, sizeof(policy));
5744 cpu = get_cpu(); 5744 cpu = get_cpu();
5745 cpufreq_get_policy(&policy, cpu); 5745 cpufreq_get_policy(&policy, cpu);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index b8b6a60b32cf..0d4fb3ebbbac 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -435,7 +435,7 @@ slow_irqon:
435 435
436 ret = get_user_pages_unlocked(start, 436 ret = get_user_pages_unlocked(start,
437 (end - start) >> PAGE_SHIFT, 437 (end - start) >> PAGE_SHIFT,
438 write, 0, pages); 438 pages, write ? FOLL_WRITE : 0);
439 439
440 /* Have to be a bit careful with return values */ 440 /* Have to be a bit careful with return values */
441 if (nr > 0) { 441 if (nr > 0) {
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index ddd2661c4502..887e57182716 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
104 * consistent with the vaddr_start/vaddr_end variables. 104 * consistent with the vaddr_start/vaddr_end variables.
105 */ 105 */
106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 106 BUILD_BUG_ON(vaddr_start >= vaddr_end);
107 BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) && 107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
108 vaddr_end >= EFI_VA_START); 108 vaddr_end >= EFI_VA_START);
109 BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) || 109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
110 config_enabled(CONFIG_EFI)) && 110 IS_ENABLED(CONFIG_EFI)) &&
111 vaddr_end >= __START_KERNEL_map); 111 vaddr_end >= __START_KERNEL_map);
112 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); 112 BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
113 113
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 80476878eb4c..e4f800999b32 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
544{ 544{
545 long gup_ret; 545 long gup_ret;
546 int nr_pages = 1; 546 int nr_pages = 1;
547 int force = 0;
548 547
549 gup_ret = get_user_pages((unsigned long)addr, nr_pages, write, 548 gup_ret = get_user_pages((unsigned long)addr, nr_pages,
550 force, NULL, NULL); 549 write ? FOLL_WRITE : 0, NULL, NULL);
551 /* 550 /*
552 * get_user_pages() returns number of pages gotten. 551 * get_user_pages() returns number of pages gotten.
553 * 0 means we failed to fault in and get anything, 552 * 0 means we failed to fault in and get anything,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 170cc4ff057b..83e701f160a9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
730 free_memtype(start, end); 730 free_memtype(start, end);
731} 731}
732 732
733int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
734{
735 enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
736
737 return io_reserve_memtype(start, start + size, &type);
738}
739EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
740
741void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
742{
743 io_free_memtype(start, start + size);
744}
745EXPORT_SYMBOL(arch_io_free_memtype_wc);
746
733pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 747pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
734 unsigned long size, pgprot_t vma_prot) 748 unsigned long size, pgprot_t vma_prot)
735{ 749{
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index b4d5e95fe4df..4a6a5a26c582 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
40 */ 40 */
41 return BIOS_STATUS_UNIMPLEMENTED; 41 return BIOS_STATUS_UNIMPLEMENTED;
42 42
43 ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); 43 /*
44 * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
45 * callback method, which uses efi_call() directly, with the kernel page tables:
46 */
47 if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags)))
48 ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
49 else
50 ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
51
44 return ret; 52 return ret;
45} 53}
46EXPORT_SYMBOL_GPL(uv_bios_call); 54EXPORT_SYMBOL_GPL(uv_bios_call);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 5766ead6fdb9..60a5a5a85505 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -36,7 +36,8 @@ int is_syscall(unsigned long addr)
36 * slow, but that doesn't matter, since it will be called only 36 * slow, but that doesn't matter, since it will be called only
37 * in case of singlestepping, if copy_from_user failed. 37 * in case of singlestepping, if copy_from_user failed.
38 */ 38 */
39 n = access_process_vm(current, addr, &instr, sizeof(instr), 0); 39 n = access_process_vm(current, addr, &instr, sizeof(instr),
40 FOLL_FORCE);
40 if (n != sizeof(instr)) { 41 if (n != sizeof(instr)) {
41 printk(KERN_ERR "is_syscall : failed to read " 42 printk(KERN_ERR "is_syscall : failed to read "
42 "instruction from 0x%lx\n", addr); 43 "instruction from 0x%lx\n", addr);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index 0b5c184dd5b3..e30202b1716e 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -212,7 +212,8 @@ int is_syscall(unsigned long addr)
212 * slow, but that doesn't matter, since it will be called only 212 * slow, but that doesn't matter, since it will be called only
213 * in case of singlestepping, if copy_from_user failed. 213 * in case of singlestepping, if copy_from_user failed.
214 */ 214 */
215 n = access_process_vm(current, addr, &instr, sizeof(instr), 0); 215 n = access_process_vm(current, addr, &instr, sizeof(instr),
216 FOLL_FORCE);
216 if (n != sizeof(instr)) { 217 if (n != sizeof(instr)) {
217 printk("is_syscall : failed to read instruction from " 218 printk("is_syscall : failed to read instruction from "
218 "0x%lx\n", addr); 219 "0x%lx\n", addr);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0fdd57da7aa..bdd855685403 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
1837 1837
1838 xen_domain_type = XEN_HVM_DOMAIN; 1838 xen_domain_type = XEN_HVM_DOMAIN;
1839} 1839}
1840#endif
1840 1841
1841static int xen_cpu_up_prepare(unsigned int cpu) 1842static int xen_cpu_up_prepare(unsigned int cpu)
1842{ 1843{
@@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
1887 return 0; 1888 return 0;
1888} 1889}
1889 1890
1891#ifdef CONFIG_XEN_PVHVM
1890#ifdef CONFIG_KEXEC_CORE 1892#ifdef CONFIG_KEXEC_CORE
1891static void xen_hvm_shutdown(void) 1893static void xen_hvm_shutdown(void)
1892{ 1894{
diff --git a/block/badblocks.c b/block/badblocks.c
index 7be53cb1cc3c..6ebcef282314 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -133,6 +133,26 @@ retry:
133} 133}
134EXPORT_SYMBOL_GPL(badblocks_check); 134EXPORT_SYMBOL_GPL(badblocks_check);
135 135
136static void badblocks_update_acked(struct badblocks *bb)
137{
138 u64 *p = bb->page;
139 int i;
140 bool unacked = false;
141
142 if (!bb->unacked_exist)
143 return;
144
145 for (i = 0; i < bb->count ; i++) {
146 if (!BB_ACK(p[i])) {
147 unacked = true;
148 break;
149 }
150 }
151
152 if (!unacked)
153 bb->unacked_exist = 0;
154}
155
136/** 156/**
137 * badblocks_set() - Add a range of bad blocks to the table. 157 * badblocks_set() - Add a range of bad blocks to the table.
138 * @bb: the badblocks structure that holds all badblock information 158 * @bb: the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
294 bb->changed = 1; 314 bb->changed = 1;
295 if (!acknowledged) 315 if (!acknowledged)
296 bb->unacked_exist = 1; 316 bb->unacked_exist = 1;
317 else
318 badblocks_update_acked(bb);
297 write_sequnlock_irqrestore(&bb->lock, flags); 319 write_sequnlock_irqrestore(&bb->lock, flags);
298 320
299 return rv; 321 return rv;
@@ -354,7 +376,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
354 * current range. Earlier ranges could also overlap, 376 * current range. Earlier ranges could also overlap,
355 * but only this one can overlap the end of the range. 377 * but only this one can overlap the end of the range.
356 */ 378 */
357 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 379 if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
380 (BB_OFFSET(p[lo]) < target)) {
358 /* Partial overlap, leave the tail of this range */ 381 /* Partial overlap, leave the tail of this range */
359 int ack = BB_ACK(p[lo]); 382 int ack = BB_ACK(p[lo]);
360 sector_t a = BB_OFFSET(p[lo]); 383 sector_t a = BB_OFFSET(p[lo]);
@@ -377,7 +400,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
377 lo--; 400 lo--;
378 } 401 }
379 while (lo >= 0 && 402 while (lo >= 0 &&
380 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 403 (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
404 (BB_OFFSET(p[lo]) < target)) {
381 /* This range does overlap */ 405 /* This range does overlap */
382 if (BB_OFFSET(p[lo]) < s) { 406 if (BB_OFFSET(p[lo]) < s) {
383 /* Keep the early parts of this range. */ 407 /* Keep the early parts of this range. */
@@ -399,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
399 } 423 }
400 } 424 }
401 425
426 badblocks_update_acked(bb);
402 bb->changed = 1; 427 bb->changed = 1;
403out: 428out:
404 write_sequnlock_irq(&bb->lock); 429 write_sequnlock_irq(&bb->lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6a14b68b9135..3c882cbc7541 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -343,6 +343,34 @@ static void flush_data_end_io(struct request *rq, int error)
343 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 343 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
344 344
345 /* 345 /*
346 * Updating q->in_flight[] here for making this tag usable
347 * early. Because in blk_queue_start_tag(),
348 * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
349 * reserve tags for sync I/O.
350 *
351 * More importantly this way can avoid the following I/O
352 * deadlock:
353 *
354 * - suppose there are 40 fua requests comming to flush queue
355 * and queue depth is 31
356 * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
357 * tag for async I/O any more
358 * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
359 * and flush_data_end_io() is called
360 * - the other rqs still can't go ahead if not updating
361 * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
362 * are held in flush data queue and make no progress of
363 * handling post flush rq
364 * - only after the post flush rq is handled, all these rqs
365 * can be completed
366 */
367
368 elv_completed_request(q, rq);
369
370 /* for avoiding double accounting */
371 rq->cmd_flags &= ~REQ_STARTED;
372
373 /*
346 * After populating an empty queue, kick it to avoid stall. Read 374 * After populating an empty queue, kick it to avoid stall. Read
347 * the comment in flush_end_io(). 375 * the comment in flush_end_io().
348 */ 376 */
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ddc2eed64771..f3d27a6dee09 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1217 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); 1217 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
1218 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); 1218 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
1219 1219
1220 hctx->queued++; 1220 data->hctx = alloc_data.hctx;
1221 data->hctx = hctx; 1221 data->ctx = alloc_data.ctx;
1222 data->ctx = ctx; 1222 data->hctx->queued++;
1223 return rq; 1223 return rq;
1224} 1224}
1225 1225
diff --git a/drivers/Makefile b/drivers/Makefile
index f0afdfb3c7df..194d20bee7dc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -21,7 +21,7 @@ obj-y += video/
21obj-y += idle/ 21obj-y += idle/
22 22
23# IPMI must come before ACPI in order to provide IPMI opregion support 23# IPMI must come before ACPI in order to provide IPMI opregion support
24obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/ 24obj-y += char/ipmi/
25 25
26obj-$(CONFIG_ACPI) += acpi/ 26obj-$(CONFIG_ACPI) += acpi/
27obj-$(CONFIG_SFI) += sfi/ 27obj-$(CONFIG_SFI) += sfi/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index f1e6dcc7a827..54d48b90de2c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -46,6 +46,7 @@
46#include "acdispat.h" 46#include "acdispat.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "actables.h" 48#include "actables.h"
49#include "acinterp.h"
49 50
50#define _COMPONENT ACPI_DISPATCHER 51#define _COMPONENT ACPI_DISPATCHER
51ACPI_MODULE_NAME("dsinit") 52ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
214 215
215 /* Walk entire namespace from the supplied root */ 216 /* Walk entire namespace from the supplied root */
216 217
217 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
218 if (ACPI_FAILURE(status)) {
219 return_ACPI_STATUS(status);
220 }
221
222 /* 218 /*
223 * We don't use acpi_walk_namespace since we do not want to acquire 219 * We don't use acpi_walk_namespace since we do not want to acquire
224 * the namespace reader lock. 220 * the namespace reader lock.
225 */ 221 */
226 status = 222 status =
227 acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, 223 acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
228 ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object, 224 0, acpi_ds_init_one_object, NULL, &info,
229 NULL, &info, NULL); 225 NULL);
230 if (ACPI_FAILURE(status)) { 226 if (ACPI_FAILURE(status)) {
231 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); 227 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
232 } 228 }
233 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
234 229
235 status = acpi_get_table_by_index(table_index, &table); 230 status = acpi_get_table_by_index(table_index, &table);
236 if (ACPI_FAILURE(status)) { 231 if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 32e9ddc0cf2b..2b3210f42a46 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
99 "Method auto-serialization parse [%4.4s] %p\n", 99 "Method auto-serialization parse [%4.4s] %p\n",
100 acpi_ut_get_node_name(node), node)); 100 acpi_ut_get_node_name(node), node));
101 101
102 acpi_ex_enter_interpreter();
103
104 /* Create/Init a root op for the method parse tree */ 102 /* Create/Init a root op for the method parse tree */
105 103
106 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); 104 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
107 if (!op) { 105 if (!op) {
108 status = AE_NO_MEMORY; 106 return_ACPI_STATUS(AE_NO_MEMORY);
109 goto unlock;
110 } 107 }
111 108
112 acpi_ps_set_name(op, node->name.integer); 109 acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
118 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); 115 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
119 if (!walk_state) { 116 if (!walk_state) {
120 acpi_ps_free_op(op); 117 acpi_ps_free_op(op);
121 status = AE_NO_MEMORY; 118 return_ACPI_STATUS(AE_NO_MEMORY);
122 goto unlock;
123 } 119 }
124 120
125 status = acpi_ds_init_aml_walk(walk_state, op, node, 121 status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
138 status = acpi_ps_parse_aml(walk_state); 134 status = acpi_ps_parse_aml(walk_state);
139 135
140 acpi_ps_delete_parse_tree(op); 136 acpi_ps_delete_parse_tree(op);
141unlock:
142 acpi_ex_exit_interpreter();
143 return_ACPI_STATUS(status); 137 return_ACPI_STATUS(status);
144} 138}
145 139
@@ -731,26 +725,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
731 acpi_ds_method_data_delete_all(walk_state); 725 acpi_ds_method_data_delete_all(walk_state);
732 726
733 /* 727 /*
734 * If method is serialized, release the mutex and restore the
735 * current sync level for this thread
736 */
737 if (method_desc->method.mutex) {
738
739 /* Acquisition Depth handles recursive calls */
740
741 method_desc->method.mutex->mutex.acquisition_depth--;
742 if (!method_desc->method.mutex->mutex.acquisition_depth) {
743 walk_state->thread->current_sync_level =
744 method_desc->method.mutex->mutex.
745 original_sync_level;
746
747 acpi_os_release_mutex(method_desc->method.
748 mutex->mutex.os_mutex);
749 method_desc->method.mutex->mutex.thread_id = 0;
750 }
751 }
752
753 /*
754 * Delete any namespace objects created anywhere within the 728 * Delete any namespace objects created anywhere within the
755 * namespace by the execution of this method. Unless: 729 * namespace by the execution of this method. Unless:
756 * 1) This method is a module-level executable code method, in which 730 * 1) This method is a module-level executable code method, in which
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
786 ~ACPI_METHOD_MODIFIED_NAMESPACE; 760 ~ACPI_METHOD_MODIFIED_NAMESPACE;
787 } 761 }
788 } 762 }
763
764 /*
765 * If method is serialized, release the mutex and restore the
766 * current sync level for this thread
767 */
768 if (method_desc->method.mutex) {
769
770 /* Acquisition Depth handles recursive calls */
771
772 method_desc->method.mutex->mutex.acquisition_depth--;
773 if (!method_desc->method.mutex->mutex.acquisition_depth) {
774 walk_state->thread->current_sync_level =
775 method_desc->method.mutex->mutex.
776 original_sync_level;
777
778 acpi_os_release_mutex(method_desc->method.
779 mutex->mutex.os_mutex);
780 method_desc->method.mutex->mutex.thread_id = 0;
781 }
782 }
789 } 783 }
790 784
791 /* Decrement the thread count on the method */ 785 /* Decrement the thread count on the method */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 028b22a3154e..e36218206bb0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
607 } 607 }
608 } 608 }
609 609
610 acpi_ex_exit_interpreter();
611 status = 610 status =
612 acpi_ev_initialize_region 611 acpi_ev_initialize_region
613 (acpi_ns_get_attached_object(node), FALSE); 612 (acpi_ns_get_attached_object(node), FALSE);
614 acpi_ex_enter_interpreter();
615 613
616 if (ACPI_FAILURE(status)) { 614 if (ACPI_FAILURE(status)) {
617 /* 615 /*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 3843f1fc5dbb..75ddd160a716 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -45,6 +45,7 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h" 46#include "acevents.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "acinterp.h"
48 49
49#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evrgnini") 51ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
597 } 598 }
598 } 599 }
599 600
601 acpi_ex_exit_interpreter();
600 status = 602 status =
601 acpi_ev_execute_reg_method(region_obj, 603 acpi_ev_execute_reg_method(region_obj,
602 ACPI_REG_CONNECT); 604 ACPI_REG_CONNECT);
605 acpi_ex_enter_interpreter();
603 606
604 if (acpi_ns_locked) { 607 if (acpi_ns_locked) {
605 status = 608 status =
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 334d3c5ba617..d1f20143bb11 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -137,7 +137,9 @@ unlock:
137 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 137 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
138 "**** Begin Table Object Initialization\n")); 138 "**** Begin Table Object Initialization\n"));
139 139
140 acpi_ex_enter_interpreter();
140 status = acpi_ds_initialize_objects(table_index, node); 141 status = acpi_ds_initialize_objects(table_index, node);
142 acpi_ex_exit_interpreter();
141 143
142 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 144 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
143 "**** Completed Table Object Initialization\n")); 145 "**** Completed Table Object Initialization\n"));
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0a029e68d3e..0d099a24f776 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
662 ghes_do_proc(ghes, ghes->estatus); 662 ghes_do_proc(ghes, ghes->estatus);
663out: 663out:
664 ghes_clear_estatus(ghes); 664 ghes_clear_estatus(ghes);
665 return 0; 665 return rc;
666} 666}
667 667
668static void ghes_add_timer(struct ghes *ghes) 668static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index c983bf733ad3..bc3d914dfc3e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -87,6 +87,7 @@ struct acpi_pci_link {
87 87
88static LIST_HEAD(acpi_link_list); 88static LIST_HEAD(acpi_link_list);
89static DEFINE_MUTEX(acpi_link_lock); 89static DEFINE_MUTEX(acpi_link_lock);
90static int sci_irq = -1, sci_penalty;
90 91
91/* -------------------------------------------------------------------------- 92/* --------------------------------------------------------------------------
92 PCI Link Device Management 93 PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
496{ 497{
497 int penalty = 0; 498 int penalty = 0;
498 499
499 /* 500 if (irq == sci_irq)
500 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict 501 penalty += sci_penalty;
501 * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
502 * use for PCI IRQs.
503 */
504 if (irq == acpi_gbl_FADT.sci_interrupt) {
505 u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
506
507 if (type != IRQ_TYPE_LEVEL_LOW)
508 penalty += PIRQ_PENALTY_ISA_ALWAYS;
509 else
510 penalty += PIRQ_PENALTY_PCI_USING;
511 }
512 502
513 if (irq < ACPI_MAX_ISA_IRQS) 503 if (irq < ACPI_MAX_ISA_IRQS)
514 return penalty + acpi_isa_irq_penalty[irq]; 504 return penalty + acpi_isa_irq_penalty[irq];
515 505
516 penalty += acpi_irq_pci_sharing_penalty(irq); 506 return penalty + acpi_irq_pci_sharing_penalty(irq);
517 return penalty;
518} 507}
519 508
520int __init acpi_irq_penalty_init(void) 509int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
619 acpi_device_bid(link->device)); 608 acpi_device_bid(link->device));
620 return -ENODEV; 609 return -ENODEV;
621 } else { 610 } else {
611 if (link->irq.active < ACPI_MAX_ISA_IRQS)
612 acpi_isa_irq_penalty[link->irq.active] +=
613 PIRQ_PENALTY_PCI_USING;
614
622 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 615 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
623 acpi_device_name(link->device), 616 acpi_device_name(link->device),
624 acpi_device_bid(link->device), link->irq.active); 617 acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
849 continue; 842 continue;
850 843
851 if (used) 844 if (used)
852 new_penalty = acpi_irq_get_penalty(irq) + 845 new_penalty = acpi_isa_irq_penalty[irq] +
853 PIRQ_PENALTY_ISA_USED; 846 PIRQ_PENALTY_ISA_USED;
854 else 847 else
855 new_penalty = 0; 848 new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
871void acpi_penalize_isa_irq(int irq, int active) 864void acpi_penalize_isa_irq(int irq, int active)
872{ 865{
873 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) 866 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
874 acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + 867 acpi_isa_irq_penalty[irq] +=
875 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); 868 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
876} 869}
877 870
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
881 acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); 874 acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
882} 875}
883 876
877void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
878{
879 sci_irq = irq;
880
881 if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
882 polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
883 sci_penalty = PIRQ_PENALTY_PCI_USING;
884 else
885 sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
886}
887
884/* 888/*
885 * Over-ride default table to reserve additional IRQs for use by ISA 889 * Over-ride default table to reserve additional IRQs for use by ISA
886 * e.g. acpi_irq_isa=5 890 * e.g. acpi_irq_isa=5
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ba5f11cebee2..9669fc7c19df 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1418,31 +1418,34 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
1418 * Message mode could be enforced. In this case assume that advantage 1418 * Message mode could be enforced. In this case assume that advantage
1419 * of multipe MSIs is negated and use single MSI mode instead. 1419 * of multipe MSIs is negated and use single MSI mode instead.
1420 */ 1420 */
1421 nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, 1421 if (n_ports > 1) {
1422 PCI_IRQ_MSIX | PCI_IRQ_MSI); 1422 nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
1423 if (nvec > 0) { 1423 PCI_IRQ_MSIX | PCI_IRQ_MSI);
1424 if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) { 1424 if (nvec > 0) {
1425 hpriv->get_irq_vector = ahci_get_irq_vector; 1425 if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
1426 hpriv->flags |= AHCI_HFLAG_MULTI_MSI; 1426 hpriv->get_irq_vector = ahci_get_irq_vector;
1427 return nvec; 1427 hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
1428 return nvec;
1429 }
1430
1431 /*
1432 * Fallback to single MSI mode if the controller
1433 * enforced MRSM mode.
1434 */
1435 printk(KERN_INFO
1436 "ahci: MRSM is on, fallback to single MSI\n");
1437 pci_free_irq_vectors(pdev);
1428 } 1438 }
1429 1439
1430 /* 1440 /*
1431 * Fallback to single MSI mode if the controller enforced MRSM 1441 * -ENOSPC indicated we don't have enough vectors. Don't bother
1432 * mode. 1442 * trying a single vectors for any other error:
1433 */ 1443 */
1434 printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n"); 1444 if (nvec < 0 && nvec != -ENOSPC)
1435 pci_free_irq_vectors(pdev); 1445 return nvec;
1436 } 1446 }
1437 1447
1438 /* 1448 /*
1439 * -ENOSPC indicated we don't have enough vectors. Don't bother trying
1440 * a single vectors for any other error:
1441 */
1442 if (nvec < 0 && nvec != -ENOSPC)
1443 return nvec;
1444
1445 /*
1446 * If the host is not capable of supporting per-port vectors, fall 1449 * If the host is not capable of supporting per-port vectors, fall
1447 * back to single MSI before finally attempting single MSI-X. 1450 * back to single MSI before finally attempting single MSI-X.
1448 */ 1451 */
@@ -1617,7 +1620,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1617 /* legacy intx interrupts */ 1620 /* legacy intx interrupts */
1618 pci_intx(pdev, 1); 1621 pci_intx(pdev, 1);
1619 } 1622 }
1620 hpriv->irq = pdev->irq; 1623 hpriv->irq = pci_irq_vector(pdev, 0);
1621 1624
1622 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1625 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
1623 host->flags |= ATA_HOST_PARALLEL_SCAN; 1626 host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
2954 case DAC960_PD_Controller: 2954 case DAC960_PD_Controller:
2955 if (!request_region(Controller->IO_Address, 0x80, 2955 if (!request_region(Controller->IO_Address, 0x80,
2956 Controller->FullModelName)) { 2956 Controller->FullModelName)) {
2957 DAC960_Error("IO port 0x%d busy for Controller at\n", 2957 DAC960_Error("IO port 0x%lx busy for Controller at\n",
2958 Controller, Controller->IO_Address); 2958 Controller, Controller->IO_Address);
2959 goto Failure; 2959 goto Failure;
2960 } 2960 }
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
2990 case DAC960_P_Controller: 2990 case DAC960_P_Controller:
2991 if (!request_region(Controller->IO_Address, 0x80, 2991 if (!request_region(Controller->IO_Address, 0x80,
2992 Controller->FullModelName)){ 2992 Controller->FullModelName)){
2993 DAC960_Error("IO port 0x%d busy for Controller at\n", 2993 DAC960_Error("IO port 0x%lx busy for Controller at\n",
2994 Controller, Controller->IO_Address); 2994 Controller, Controller->IO_Address);
2995 goto Failure; 2995 goto Failure;
2996 } 2996 }
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ba405b55329f..19a16b2dbb91 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
164 spin_lock(&nbd->sock_lock); 164 spin_lock(&nbd->sock_lock);
165 165
166 if (!nbd->sock) { 166 if (!nbd->sock) {
167 spin_unlock_irq(&nbd->sock_lock); 167 spin_unlock(&nbd->sock_lock);
168 return; 168 return;
169 } 169 }
170 170
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index abb71628ab61..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -415,15 +415,15 @@ struct rbd_device {
415}; 415};
416 416
417/* 417/*
418 * Flag bits for rbd_dev->flags. If atomicity is required, 418 * Flag bits for rbd_dev->flags:
419 * rbd_dev->lock is used to protect access. 419 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
420 * 420 * by rbd_dev->lock
421 * Currently, only the "removing" flag (which is coupled with the 421 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
422 * "open_count" field) requires atomic access.
423 */ 422 */
424enum rbd_dev_flags { 423enum rbd_dev_flags {
425 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */ 424 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
426 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */ 425 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
426 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
427}; 427};
428 428
429static DEFINE_MUTEX(client_mutex); /* Serialize client creation */ 429static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
3926 struct rbd_device *rbd_dev = container_of(to_delayed_work(work), 3926 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3927 struct rbd_device, watch_dwork); 3927 struct rbd_device, watch_dwork);
3928 bool was_lock_owner = false; 3928 bool was_lock_owner = false;
3929 bool need_to_wake = false;
3929 int ret; 3930 int ret;
3930 3931
3931 dout("%s rbd_dev %p\n", __func__, rbd_dev); 3932 dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
3935 was_lock_owner = rbd_release_lock(rbd_dev); 3936 was_lock_owner = rbd_release_lock(rbd_dev);
3936 3937
3937 mutex_lock(&rbd_dev->watch_mutex); 3938 mutex_lock(&rbd_dev->watch_mutex);
3938 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) 3939 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3939 goto fail_unlock; 3940 mutex_unlock(&rbd_dev->watch_mutex);
3941 goto out;
3942 }
3940 3943
3941 ret = __rbd_register_watch(rbd_dev); 3944 ret = __rbd_register_watch(rbd_dev);
3942 if (ret) { 3945 if (ret) {
3943 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); 3946 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3944 if (ret != -EBLACKLISTED) 3947 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3948 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3949 need_to_wake = true;
3950 } else {
3945 queue_delayed_work(rbd_dev->task_wq, 3951 queue_delayed_work(rbd_dev->task_wq,
3946 &rbd_dev->watch_dwork, 3952 &rbd_dev->watch_dwork,
3947 RBD_RETRY_DELAY); 3953 RBD_RETRY_DELAY);
3948 goto fail_unlock; 3954 }
3955 mutex_unlock(&rbd_dev->watch_mutex);
3956 goto out;
3949 } 3957 }
3950 3958
3959 need_to_wake = true;
3951 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED; 3960 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3952 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id; 3961 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3953 mutex_unlock(&rbd_dev->watch_mutex); 3962 mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
3963 ret); 3972 ret);
3964 } 3973 }
3965 3974
3975out:
3966 up_write(&rbd_dev->lock_rwsem); 3976 up_write(&rbd_dev->lock_rwsem);
3967 wake_requests(rbd_dev, true); 3977 if (need_to_wake)
3968 return; 3978 wake_requests(rbd_dev, true);
3969
3970fail_unlock:
3971 mutex_unlock(&rbd_dev->watch_mutex);
3972 up_write(&rbd_dev->lock_rwsem);
3973} 3979}
3974 3980
3975/* 3981/*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
4074 up_read(&rbd_dev->lock_rwsem); 4080 up_read(&rbd_dev->lock_rwsem);
4075 schedule(); 4081 schedule();
4076 down_read(&rbd_dev->lock_rwsem); 4082 down_read(&rbd_dev->lock_rwsem);
4077 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); 4083 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
4084 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
4085
4078 finish_wait(&rbd_dev->lock_waitq, &wait); 4086 finish_wait(&rbd_dev->lock_waitq, &wait);
4079} 4087}
4080 4088
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
4166 4174
4167 if (must_be_locked) { 4175 if (must_be_locked) {
4168 down_read(&rbd_dev->lock_rwsem); 4176 down_read(&rbd_dev->lock_rwsem);
4169 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED) 4177 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
4178 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
4170 rbd_wait_state_locked(rbd_dev); 4179 rbd_wait_state_locked(rbd_dev);
4180
4181 WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
4182 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
4183 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
4184 result = -EBLACKLISTED;
4185 goto err_unlock;
4186 }
4171 } 4187 }
4172 4188
4173 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type, 4189 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 482794526e8c..d2d2c89de5b4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
84 84
85static void add_early_randomness(struct hwrng *rng) 85static void add_early_randomness(struct hwrng *rng)
86{ 86{
87 unsigned char bytes[16];
88 int bytes_read; 87 int bytes_read;
88 size_t size = min_t(size_t, 16, rng_buffer_size());
89 89
90 mutex_lock(&reading_mutex); 90 mutex_lock(&reading_mutex);
91 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 91 bytes_read = rng_get_data(rng, rng_buffer, size, 1);
92 mutex_unlock(&reading_mutex); 92 mutex_unlock(&reading_mutex);
93 if (bytes_read > 0) 93 if (bytes_read > 0)
94 add_device_randomness(bytes, bytes_read); 94 add_device_randomness(rng_buffer, bytes_read);
95} 95}
96 96
97static inline void cleanup_rng(struct kref *kref) 97static inline void cleanup_rng(struct kref *kref)
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 5a9350b1069a..7f816655cbbf 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
76 the IPMI management controller is capable of this. 76 the IPMI management controller is capable of this.
77 77
78endif # IPMI_HANDLER 78endif # IPMI_HANDLER
79
80config ASPEED_BT_IPMI_BMC
81 depends on ARCH_ASPEED
82 tristate "BT IPMI bmc driver"
83 help
84 Provides a driver for the BT (Block Transfer) IPMI interface
85 found on Aspeed SOCs (AST2400 and AST2500). The driver
86 implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index f3ffde1f5f1f..0d98cd91def1 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
11obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o 11obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
12obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o 12obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
13obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o 13obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
14obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000000..b49e61320952
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,505 @@
1/*
2 * Copyright (c) 2015-2016, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/atomic.h>
11#include <linux/bt-bmc.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/poll.h>
19#include <linux/sched.h>
20#include <linux/timer.h>
21
22/*
23 * This is a BMC device used to communicate to the host
24 */
25#define DEVICE_NAME "ipmi-bt-host"
26
27#define BT_IO_BASE 0xe4
28#define BT_IRQ 10
29
30#define BT_CR0 0x0
31#define BT_CR0_IO_BASE 16
32#define BT_CR0_IRQ 12
33#define BT_CR0_EN_CLR_SLV_RDP 0x8
34#define BT_CR0_EN_CLR_SLV_WRP 0x4
35#define BT_CR0_ENABLE_IBT 0x1
36#define BT_CR1 0x4
37#define BT_CR1_IRQ_H2B 0x01
38#define BT_CR1_IRQ_HBUSY 0x40
39#define BT_CR2 0x8
40#define BT_CR2_IRQ_H2B 0x01
41#define BT_CR2_IRQ_HBUSY 0x40
42#define BT_CR3 0xc
43#define BT_CTRL 0x10
44#define BT_CTRL_B_BUSY 0x80
45#define BT_CTRL_H_BUSY 0x40
46#define BT_CTRL_OEM0 0x20
47#define BT_CTRL_SMS_ATN 0x10
48#define BT_CTRL_B2H_ATN 0x08
49#define BT_CTRL_H2B_ATN 0x04
50#define BT_CTRL_CLR_RD_PTR 0x02
51#define BT_CTRL_CLR_WR_PTR 0x01
52#define BT_BMC2HOST 0x14
53#define BT_INTMASK 0x18
54#define BT_INTMASK_B2H_IRQEN 0x01
55#define BT_INTMASK_B2H_IRQ 0x02
56#define BT_INTMASK_BMC_HWRST 0x80
57
58#define BT_BMC_BUFFER_SIZE 256
59
60struct bt_bmc {
61 struct device dev;
62 struct miscdevice miscdev;
63 void __iomem *base;
64 int irq;
65 wait_queue_head_t queue;
66 struct timer_list poll_timer;
67 struct mutex mutex;
68};
69
70static atomic_t open_count = ATOMIC_INIT(0);
71
72static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
73{
74 return ioread8(bt_bmc->base + reg);
75}
76
77static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
78{
79 iowrite8(data, bt_bmc->base + reg);
80}
81
82static void clr_rd_ptr(struct bt_bmc *bt_bmc)
83{
84 bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
85}
86
87static void clr_wr_ptr(struct bt_bmc *bt_bmc)
88{
89 bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
90}
91
92static void clr_h2b_atn(struct bt_bmc *bt_bmc)
93{
94 bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
95}
96
97static void set_b_busy(struct bt_bmc *bt_bmc)
98{
99 if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
100 bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
101}
102
103static void clr_b_busy(struct bt_bmc *bt_bmc)
104{
105 if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
106 bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
107}
108
109static void set_b2h_atn(struct bt_bmc *bt_bmc)
110{
111 bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
112}
113
114static u8 bt_read(struct bt_bmc *bt_bmc)
115{
116 return bt_inb(bt_bmc, BT_BMC2HOST);
117}
118
119static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
120{
121 int i;
122
123 for (i = 0; i < n; i++)
124 buf[i] = bt_read(bt_bmc);
125 return n;
126}
127
128static void bt_write(struct bt_bmc *bt_bmc, u8 c)
129{
130 bt_outb(bt_bmc, c, BT_BMC2HOST);
131}
132
133static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
134{
135 int i;
136
137 for (i = 0; i < n; i++)
138 bt_write(bt_bmc, buf[i]);
139 return n;
140}
141
142static void set_sms_atn(struct bt_bmc *bt_bmc)
143{
144 bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
145}
146
147static struct bt_bmc *file_bt_bmc(struct file *file)
148{
149 return container_of(file->private_data, struct bt_bmc, miscdev);
150}
151
152static int bt_bmc_open(struct inode *inode, struct file *file)
153{
154 struct bt_bmc *bt_bmc = file_bt_bmc(file);
155
156 if (atomic_inc_return(&open_count) == 1) {
157 clr_b_busy(bt_bmc);
158 return 0;
159 }
160
161 atomic_dec(&open_count);
162 return -EBUSY;
163}
164
165/*
166 * The BT (Block Transfer) interface means that entire messages are
167 * buffered by the host before a notification is sent to the BMC that
168 * there is data to be read. The first byte is the length and the
169 * message data follows. The read operation just tries to capture the
170 * whole before returning it to userspace.
171 *
172 * BT Message format :
173 *
174 * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
175 * Length NetFn/LUN Seq Cmd Data
176 *
177 */
178static ssize_t bt_bmc_read(struct file *file, char __user *buf,
179 size_t count, loff_t *ppos)
180{
181 struct bt_bmc *bt_bmc = file_bt_bmc(file);
182 u8 len;
183 int len_byte = 1;
184 u8 kbuffer[BT_BMC_BUFFER_SIZE];
185 ssize_t ret = 0;
186 ssize_t nread;
187
188 if (!access_ok(VERIFY_WRITE, buf, count))
189 return -EFAULT;
190
191 WARN_ON(*ppos);
192
193 if (wait_event_interruptible(bt_bmc->queue,
194 bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
195 return -ERESTARTSYS;
196
197 mutex_lock(&bt_bmc->mutex);
198
199 if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
200 ret = -EIO;
201 goto out_unlock;
202 }
203
204 set_b_busy(bt_bmc);
205 clr_h2b_atn(bt_bmc);
206 clr_rd_ptr(bt_bmc);
207
208 /*
209 * The BT frames start with the message length, which does not
210 * include the length byte.
211 */
212 kbuffer[0] = bt_read(bt_bmc);
213 len = kbuffer[0];
214
215 /* We pass the length back to userspace as well */
216 if (len + 1 > count)
217 len = count - 1;
218
219 while (len) {
220 nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
221
222 bt_readn(bt_bmc, kbuffer + len_byte, nread);
223
224 if (copy_to_user(buf, kbuffer, nread + len_byte)) {
225 ret = -EFAULT;
226 break;
227 }
228 len -= nread;
229 buf += nread + len_byte;
230 ret += nread + len_byte;
231 len_byte = 0;
232 }
233
234 clr_b_busy(bt_bmc);
235
236out_unlock:
237 mutex_unlock(&bt_bmc->mutex);
238 return ret;
239}
240
241/*
242 * BT Message response format :
243 *
244 * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
245 * Length NetFn/LUN Seq Cmd Code Data
246 */
247static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
248 size_t count, loff_t *ppos)
249{
250 struct bt_bmc *bt_bmc = file_bt_bmc(file);
251 u8 kbuffer[BT_BMC_BUFFER_SIZE];
252 ssize_t ret = 0;
253 ssize_t nwritten;
254
255 /*
256 * send a minimum response size
257 */
258 if (count < 5)
259 return -EINVAL;
260
261 if (!access_ok(VERIFY_READ, buf, count))
262 return -EFAULT;
263
264 WARN_ON(*ppos);
265
266 /*
267 * There's no interrupt for clearing bmc busy so we have to
268 * poll
269 */
270 if (wait_event_interruptible(bt_bmc->queue,
271 !(bt_inb(bt_bmc, BT_CTRL) &
272 (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
273 return -ERESTARTSYS;
274
275 mutex_lock(&bt_bmc->mutex);
276
277 if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
278 (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
279 ret = -EIO;
280 goto out_unlock;
281 }
282
283 clr_wr_ptr(bt_bmc);
284
285 while (count) {
286 nwritten = min_t(ssize_t, count, sizeof(kbuffer));
287 if (copy_from_user(&kbuffer, buf, nwritten)) {
288 ret = -EFAULT;
289 break;
290 }
291
292 bt_writen(bt_bmc, kbuffer, nwritten);
293
294 count -= nwritten;
295 buf += nwritten;
296 ret += nwritten;
297 }
298
299 set_b2h_atn(bt_bmc);
300
301out_unlock:
302 mutex_unlock(&bt_bmc->mutex);
303 return ret;
304}
305
306static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
307 unsigned long param)
308{
309 struct bt_bmc *bt_bmc = file_bt_bmc(file);
310
311 switch (cmd) {
312 case BT_BMC_IOCTL_SMS_ATN:
313 set_sms_atn(bt_bmc);
314 return 0;
315 }
316 return -EINVAL;
317}
318
319static int bt_bmc_release(struct inode *inode, struct file *file)
320{
321 struct bt_bmc *bt_bmc = file_bt_bmc(file);
322
323 atomic_dec(&open_count);
324 set_b_busy(bt_bmc);
325 return 0;
326}
327
328static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
329{
330 struct bt_bmc *bt_bmc = file_bt_bmc(file);
331 unsigned int mask = 0;
332 u8 ctrl;
333
334 poll_wait(file, &bt_bmc->queue, wait);
335
336 ctrl = bt_inb(bt_bmc, BT_CTRL);
337
338 if (ctrl & BT_CTRL_H2B_ATN)
339 mask |= POLLIN;
340
341 if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
342 mask |= POLLOUT;
343
344 return mask;
345}
346
347static const struct file_operations bt_bmc_fops = {
348 .owner = THIS_MODULE,
349 .open = bt_bmc_open,
350 .read = bt_bmc_read,
351 .write = bt_bmc_write,
352 .release = bt_bmc_release,
353 .poll = bt_bmc_poll,
354 .unlocked_ioctl = bt_bmc_ioctl,
355};
356
357static void poll_timer(unsigned long data)
358{
359 struct bt_bmc *bt_bmc = (void *)data;
360
361 bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
362 wake_up(&bt_bmc->queue);
363 add_timer(&bt_bmc->poll_timer);
364}
365
366static irqreturn_t bt_bmc_irq(int irq, void *arg)
367{
368 struct bt_bmc *bt_bmc = arg;
369 u32 reg;
370
371 reg = ioread32(bt_bmc->base + BT_CR2);
372 reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
373 if (!reg)
374 return IRQ_NONE;
375
376 /* ack pending IRQs */
377 iowrite32(reg, bt_bmc->base + BT_CR2);
378
379 wake_up(&bt_bmc->queue);
380 return IRQ_HANDLED;
381}
382
383static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
384 struct platform_device *pdev)
385{
386 struct device *dev = &pdev->dev;
387 u32 reg;
388 int rc;
389
390 bt_bmc->irq = platform_get_irq(pdev, 0);
391 if (!bt_bmc->irq)
392 return -ENODEV;
393
394 rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
395 DEVICE_NAME, bt_bmc);
396 if (rc < 0) {
397 dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
398 bt_bmc->irq = 0;
399 return rc;
400 }
401
402 /*
403 * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
404 * H2B will be asserted when the bmc has data for us; HBUSY
405 * will be cleared (along with B2H) when we can write the next
406 * message to the BT buffer
407 */
408 reg = ioread32(bt_bmc->base + BT_CR1);
409 reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
410 iowrite32(reg, bt_bmc->base + BT_CR1);
411
412 return 0;
413}
414
415static int bt_bmc_probe(struct platform_device *pdev)
416{
417 struct bt_bmc *bt_bmc;
418 struct device *dev;
419 struct resource *res;
420 int rc;
421
422 if (!pdev || !pdev->dev.of_node)
423 return -ENODEV;
424
425 dev = &pdev->dev;
426 dev_info(dev, "Found bt bmc device\n");
427
428 bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
429 if (!bt_bmc)
430 return -ENOMEM;
431
432 dev_set_drvdata(&pdev->dev, bt_bmc);
433
434 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
435 bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
436 if (IS_ERR(bt_bmc->base))
437 return PTR_ERR(bt_bmc->base);
438
439 mutex_init(&bt_bmc->mutex);
440 init_waitqueue_head(&bt_bmc->queue);
441
442 bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
443 bt_bmc->miscdev.name = DEVICE_NAME,
444 bt_bmc->miscdev.fops = &bt_bmc_fops,
445 bt_bmc->miscdev.parent = dev;
446 rc = misc_register(&bt_bmc->miscdev);
447 if (rc) {
448 dev_err(dev, "Unable to register misc device\n");
449 return rc;
450 }
451
452 bt_bmc_config_irq(bt_bmc, pdev);
453
454 if (bt_bmc->irq) {
455 dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
456 } else {
457 dev_info(dev, "No IRQ; using timer\n");
458 setup_timer(&bt_bmc->poll_timer, poll_timer,
459 (unsigned long)bt_bmc);
460 bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
461 add_timer(&bt_bmc->poll_timer);
462 }
463
464 iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
465 (BT_IRQ << BT_CR0_IRQ) |
466 BT_CR0_EN_CLR_SLV_RDP |
467 BT_CR0_EN_CLR_SLV_WRP |
468 BT_CR0_ENABLE_IBT,
469 bt_bmc->base + BT_CR0);
470
471 clr_b_busy(bt_bmc);
472
473 return 0;
474}
475
476static int bt_bmc_remove(struct platform_device *pdev)
477{
478 struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
479
480 misc_deregister(&bt_bmc->miscdev);
481 if (!bt_bmc->irq)
482 del_timer_sync(&bt_bmc->poll_timer);
483 return 0;
484}
485
486static const struct of_device_id bt_bmc_match[] = {
487 { .compatible = "aspeed,ast2400-bt-bmc" },
488 { },
489};
490
491static struct platform_driver bt_bmc_driver = {
492 .driver = {
493 .name = DEVICE_NAME,
494 .of_match_table = bt_bmc_match,
495 },
496 .probe = bt_bmc_probe,
497 .remove = bt_bmc_remove,
498};
499
500module_platform_driver(bt_bmc_driver);
501
502MODULE_DEVICE_TABLE(of, bt_bmc_match);
503MODULE_LICENSE("GPL");
504MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
505MODULE_DESCRIPTION("Linux device interface to the BT interface");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d8619998cfb5..fcdd886819f5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
2891 intf->curr_channel = IPMI_MAX_CHANNELS; 2891 intf->curr_channel = IPMI_MAX_CHANNELS;
2892 } 2892 }
2893 2893
2894 rv = ipmi_bmc_register(intf, i);
2895
2894 if (rv == 0) 2896 if (rv == 0)
2895 rv = add_proc_entries(intf, i); 2897 rv = add_proc_entries(intf, i);
2896 2898
2897 rv = ipmi_bmc_register(intf, i);
2898
2899 out: 2899 out:
2900 if (rv) { 2900 if (rv) {
2901 if (intf->proc_dir) 2901 if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
2982 int intf_num = intf->intf_num; 2982 int intf_num = intf->intf_num;
2983 ipmi_user_t user; 2983 ipmi_user_t user;
2984 2984
2985 ipmi_bmc_unregister(intf);
2986
2987 mutex_lock(&smi_watchers_mutex); 2985 mutex_lock(&smi_watchers_mutex);
2988 mutex_lock(&ipmi_interfaces_mutex); 2986 mutex_lock(&ipmi_interfaces_mutex);
2989 intf->intf_num = -1; 2987 intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
3007 mutex_unlock(&ipmi_interfaces_mutex); 3005 mutex_unlock(&ipmi_interfaces_mutex);
3008 3006
3009 remove_proc_entries(intf); 3007 remove_proc_entries(intf);
3008 ipmi_bmc_unregister(intf);
3010 3009
3011 /* 3010 /*
3012 * Call all the watcher interfaces to tell them that 3011 * Call all the watcher interfaces to tell them that
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 190122e64a3a..85a449cf61e3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
203 ret = clk_hw_register(NULL, &prog->hw); 203 ret = clk_hw_register(NULL, &prog->hw);
204 if (ret) { 204 if (ret) {
205 kfree(prog); 205 kfree(prog);
206 hw = &prog->hw; 206 hw = ERR_PTR(ret);
207 } 207 }
208 208
209 return hw; 209 return hw;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index b68bf573dcfb..8c7763fd9efc 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
502static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate, 502static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
503 unsigned long *parent_rate) 503 unsigned long *parent_rate)
504{ 504{
505 struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
506 const struct bcm2835_pll_data *data = pll->data;
505 u32 ndiv, fdiv; 507 u32 ndiv, fdiv;
506 508
509 rate = clamp(rate, data->min_rate, data->max_rate);
510
507 bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv); 511 bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
508 512
509 return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1); 513 return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
608 u32 ana[4]; 612 u32 ana[4];
609 int i; 613 int i;
610 614
611 if (rate < data->min_rate || rate > data->max_rate) {
612 dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
613 clk_hw_get_name(hw), rate,
614 data->min_rate, data->max_rate);
615 return -EINVAL;
616 }
617
618 if (rate > data->max_fb_rate) { 615 if (rate > data->max_fb_rate) {
619 use_fb_prediv = true; 616 use_fb_prediv = true;
620 rate /= 2; 617 rate /= 2;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index b637f5979023..eb953d3b0b69 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
216 return -EINVAL; 216 return -EINVAL;
217 } 217 }
218 218
219 drv_data->num_clks = num_clks;
219 drv_data->max_clk_data = devm_kcalloc(dev, num_clks, 220 drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
220 sizeof(*drv_data->max_clk_data), 221 sizeof(*drv_data->max_clk_data),
221 GFP_KERNEL); 222 GFP_KERNEL);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index fe364e63f8de..c0e8e1f196aa 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
195 hi6220_clk_register_divider(hi6220_div_clks_sys, 195 hi6220_clk_register_divider(hi6220_div_clks_sys,
196 ARRAY_SIZE(hi6220_div_clks_sys), clk_data); 196 ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
197} 197}
198CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init); 198CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
199 199
200 200
201/* clocks in media controller */ 201/* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
252 hi6220_clk_register_divider(hi6220_div_clks_media, 252 hi6220_clk_register_divider(hi6220_div_clks_media,
253 ARRAY_SIZE(hi6220_div_clks_media), clk_data); 253 ARRAY_SIZE(hi6220_div_clks_media), clk_data);
254} 254}
255CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init); 255CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
256 256
257 257
258/* clocks in pmctrl */ 258/* clocks in pmctrl */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 380c372d528e..f042bd2a6a99 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
8 8
9config COMMON_CLK_MT8135 9config COMMON_CLK_MT8135
10 bool "Clock driver for Mediatek MT8135" 10 bool "Clock driver for Mediatek MT8135"
11 depends on ARCH_MEDIATEK || COMPILE_TEST
11 select COMMON_CLK_MEDIATEK 12 select COMMON_CLK_MEDIATEK
12 default ARCH_MEDIATEK 13 default ARCH_MEDIATEK
13 ---help--- 14 ---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
15 16
16config COMMON_CLK_MT8173 17config COMMON_CLK_MT8173
17 bool "Clock driver for Mediatek MT8173" 18 bool "Clock driver for Mediatek MT8173"
19 depends on ARCH_MEDIATEK || COMPILE_TEST
18 select COMMON_CLK_MEDIATEK 20 select COMMON_CLK_MEDIATEK
19 default ARCH_MEDIATEK 21 default ARCH_MEDIATEK
20 ---help--- 22 ---help---
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 45905fc0d75b..cecb0fdfaef6 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
305}; 305};
306static int armada_3700_add_composite_clk(const struct clk_periph_data *data, 306static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
307 void __iomem *reg, spinlock_t *lock, 307 void __iomem *reg, spinlock_t *lock,
308 struct device *dev, struct clk_hw *hw) 308 struct device *dev, struct clk_hw **hw)
309{ 309{
310 const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, 310 const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
311 *rate_ops = NULL; 311 *rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
329 gate->lock = lock; 329 gate->lock = lock;
330 gate_ops = gate_hw->init->ops; 330 gate_ops = gate_hw->init->ops;
331 gate->reg = reg + (u64)gate->reg; 331 gate->reg = reg + (u64)gate->reg;
332 gate->flags = CLK_GATE_SET_TO_DISABLE;
332 } 333 }
333 334
334 if (data->rate_hw) { 335 if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
353 } 354 }
354 } 355 }
355 356
356 hw = clk_hw_register_composite(dev, data->name, data->parent_names, 357 *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
357 data->num_parents, mux_hw, 358 data->num_parents, mux_hw,
358 mux_ops, rate_hw, rate_ops, 359 mux_ops, rate_hw, rate_ops,
359 gate_hw, gate_ops, CLK_IGNORE_UNUSED); 360 gate_hw, gate_ops, CLK_IGNORE_UNUSED);
360 361
361 if (IS_ERR(hw)) 362 if (IS_ERR(*hw))
362 return PTR_ERR(hw); 363 return PTR_ERR(*hw);
363 364
364 return 0; 365 return 0;
365} 366}
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
400 spin_lock_init(&driver_data->lock); 401 spin_lock_init(&driver_data->lock);
401 402
402 for (i = 0; i < num_periph; i++) { 403 for (i = 0; i < num_periph; i++) {
403 struct clk_hw *hw = driver_data->hw_data->hws[i]; 404 struct clk_hw **hw = &driver_data->hw_data->hws[i];
404 405
405 if (armada_3700_add_composite_clk(&data[i], reg, 406 if (armada_3700_add_composite_clk(&data[i], reg,
406 &driver_data->lock, dev, hw)) 407 &driver_data->lock, dev, hw))
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 51d152f735cc..17e68a724945 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
106 }, 106 },
107 { }, 107 { },
108}; 108};
109MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
109 110
110static void exynos_audss_clk_teardown(void) 111static void exynos_audss_clk_teardown(void)
111{ 112{
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 5ffb898d0839..26c53f7963a4 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
79 hw_data->num = clk_num; 79 hw_data->num = clk_num;
80 80
81 /* avoid returning NULL for unused idx */ 81 /* avoid returning NULL for unused idx */
82 for (; clk_num >= 0; clk_num--) 82 while (--clk_num >= 0)
83 hw_data->hws[clk_num] = ERR_PTR(-EINVAL); 83 hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
84 84
85 for (p = data; p->name; p++) { 85 for (p = data; p->name; p++) {
@@ -111,6 +111,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
111static const struct of_device_id uniphier_clk_match[] = { 111static const struct of_device_id uniphier_clk_match[] = {
112 /* System clock */ 112 /* System clock */
113 { 113 {
114 .compatible = "socionext,uniphier-sld3-clock",
115 .data = uniphier_sld3_sys_clk_data,
116 },
117 {
114 .compatible = "socionext,uniphier-ld4-clock", 118 .compatible = "socionext,uniphier-ld4-clock",
115 .data = uniphier_ld4_sys_clk_data, 119 .data = uniphier_ld4_sys_clk_data,
116 }, 120 },
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
138 .compatible = "socionext,uniphier-ld20-clock", 142 .compatible = "socionext,uniphier-ld20-clock",
139 .data = uniphier_ld20_sys_clk_data, 143 .data = uniphier_ld20_sys_clk_data,
140 }, 144 },
141 /* Media I/O clock */ 145 /* Media I/O clock, SD clock */
142 { 146 {
143 .compatible = "socionext,uniphier-sld3-mio-clock", 147 .compatible = "socionext,uniphier-sld3-mio-clock",
144 .data = uniphier_sld3_mio_clk_data, 148 .data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
156 .data = uniphier_sld3_mio_clk_data, 160 .data = uniphier_sld3_mio_clk_data,
157 }, 161 },
158 { 162 {
159 .compatible = "socionext,uniphier-pro5-mio-clock", 163 .compatible = "socionext,uniphier-pro5-sd-clock",
160 .data = uniphier_pro5_mio_clk_data, 164 .data = uniphier_pro5_sd_clk_data,
161 }, 165 },
162 { 166 {
163 .compatible = "socionext,uniphier-pxs2-mio-clock", 167 .compatible = "socionext,uniphier-pxs2-sd-clock",
164 .data = uniphier_pro5_mio_clk_data, 168 .data = uniphier_pro5_sd_clk_data,
165 }, 169 },
166 { 170 {
167 .compatible = "socionext,uniphier-ld11-mio-clock", 171 .compatible = "socionext,uniphier-ld11-mio-clock",
168 .data = uniphier_sld3_mio_clk_data, 172 .data = uniphier_sld3_mio_clk_data,
169 }, 173 },
170 { 174 {
171 .compatible = "socionext,uniphier-ld20-mio-clock", 175 .compatible = "socionext,uniphier-ld20-sd-clock",
172 .data = uniphier_pro5_mio_clk_data, 176 .data = uniphier_pro5_sd_clk_data,
173 }, 177 },
174 /* Peripheral clock */ 178 /* Peripheral clock */
175 { 179 {
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 6aa7ec768d0b..218d20f099ce 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
93 { /* sentinel */ } 93 { /* sentinel */ }
94}; 94};
95 95
96const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = { 96const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
97 UNIPHIER_MIO_CLK_SD_FIXED, 97 UNIPHIER_MIO_CLK_SD_FIXED,
98 UNIPHIER_MIO_CLK_SD(0, 0), 98 UNIPHIER_MIO_CLK_SD(0, 0),
99 UNIPHIER_MIO_CLK_SD(1, 1), 99 UNIPHIER_MIO_CLK_SD(1, 1),
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 15a2f2cbe0d9..2c243a894f3b 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
42 struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw); 42 struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
43 int num_parents = clk_hw_get_num_parents(hw); 43 int num_parents = clk_hw_get_num_parents(hw);
44 int ret; 44 int ret;
45 u32 val; 45 unsigned int val;
46 u8 i; 46 u8 i;
47 47
48 ret = regmap_read(mux->regmap, mux->reg, &val); 48 ret = regmap_read(mux->regmap, mux->reg, &val);
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 3ae184062388..0244dba1f4cf 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
115extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[]; 115extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
116extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[]; 116extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
117extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[]; 117extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
118extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[]; 118extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
119extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[]; 119extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
120extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[]; 120extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
121 121
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 245190839359..e2c6e43cf8ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
417config SYS_SUPPORTS_EM_STI 417config SYS_SUPPORTS_EM_STI
418 bool 418 bool
419 419
420config CLKSRC_JCORE_PIT
421 bool "J-Core PIT timer driver" if COMPILE_TEST
422 depends on OF
423 depends on GENERIC_CLOCKEVENTS
424 depends on HAS_IOMEM
425 select CLKSRC_MMIO
426 help
427 This enables build of clocksource and clockevent driver for
428 the integrated PIT in the J-Core synthesizable, open source SoC.
429
420config SH_TIMER_CMT 430config SH_TIMER_CMT
421 bool "Renesas CMT timer driver" if COMPILE_TEST 431 bool "Renesas CMT timer driver" if COMPILE_TEST
422 depends on GENERIC_CLOCKEVENTS 432 depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fd9d6df0bbc0..cf87f407f1ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
5obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o 5obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
6obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o 6obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
7obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o 7obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
8obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
8obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 9obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
9obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o 10obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
10obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o 11obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644
index 000000000000..54e1665aa03c
--- /dev/null
+++ b/drivers/clocksource/jcore-pit.c
@@ -0,0 +1,249 @@
1/*
2 * J-Core SoC PIT/clocksource driver
3 *
4 * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/interrupt.h>
14#include <linux/clockchips.h>
15#include <linux/clocksource.h>
16#include <linux/sched_clock.h>
17#include <linux/cpu.h>
18#include <linux/cpuhotplug.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21
22#define PIT_IRQ_SHIFT 12
23#define PIT_PRIO_SHIFT 20
24#define PIT_ENABLE_SHIFT 26
25#define PIT_PRIO_MASK 0xf
26
27#define REG_PITEN 0x00
28#define REG_THROT 0x10
29#define REG_COUNT 0x14
30#define REG_BUSPD 0x18
31#define REG_SECHI 0x20
32#define REG_SECLO 0x24
33#define REG_NSEC 0x28
34
35struct jcore_pit {
36 struct clock_event_device ced;
37 void __iomem *base;
38 unsigned long periodic_delta;
39 u32 enable_val;
40};
41
42static void __iomem *jcore_pit_base;
43static struct jcore_pit __percpu *jcore_pit_percpu;
44
45static notrace u64 jcore_sched_clock_read(void)
46{
47 u32 seclo, nsec, seclo0;
48 __iomem void *base = jcore_pit_base;
49
50 seclo = readl(base + REG_SECLO);
51 do {
52 seclo0 = seclo;
53 nsec = readl(base + REG_NSEC);
54 seclo = readl(base + REG_SECLO);
55 } while (seclo0 != seclo);
56
57 return seclo * NSEC_PER_SEC + nsec;
58}
59
60static cycle_t jcore_clocksource_read(struct clocksource *cs)
61{
62 return jcore_sched_clock_read();
63}
64
65static int jcore_pit_disable(struct jcore_pit *pit)
66{
67 writel(0, pit->base + REG_PITEN);
68 return 0;
69}
70
71static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
72{
73 jcore_pit_disable(pit);
74 writel(delta, pit->base + REG_THROT);
75 writel(pit->enable_val, pit->base + REG_PITEN);
76 return 0;
77}
78
79static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
80{
81 struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
82
83 return jcore_pit_disable(pit);
84}
85
86static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
87{
88 struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
89
90 return jcore_pit_disable(pit);
91}
92
93static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
94{
95 struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
96
97 return jcore_pit_set(pit->periodic_delta, pit);
98}
99
100static int jcore_pit_set_next_event(unsigned long delta,
101 struct clock_event_device *ced)
102{
103 struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
104
105 return jcore_pit_set(delta, pit);
106}
107
108static int jcore_pit_local_init(unsigned cpu)
109{
110 struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
111 unsigned buspd, freq;
112
113 pr_info("Local J-Core PIT init on cpu %u\n", cpu);
114
115 buspd = readl(pit->base + REG_BUSPD);
116 freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
117 pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
118
119 clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
120
121 return 0;
122}
123
124static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
125{
126 struct jcore_pit *pit = this_cpu_ptr(dev_id);
127
128 if (clockevent_state_oneshot(&pit->ced))
129 jcore_pit_disable(pit);
130
131 pit->ced.event_handler(&pit->ced);
132
133 return IRQ_HANDLED;
134}
135
136static int __init jcore_pit_init(struct device_node *node)
137{
138 int err;
139 unsigned pit_irq, cpu;
140 unsigned long hwirq;
141 u32 irqprio, enable_val;
142
143 jcore_pit_base = of_iomap(node, 0);
144 if (!jcore_pit_base) {
145 pr_err("Error: Cannot map base address for J-Core PIT\n");
146 return -ENXIO;
147 }
148
149 pit_irq = irq_of_parse_and_map(node, 0);
150 if (!pit_irq) {
151 pr_err("Error: J-Core PIT has no IRQ\n");
152 return -ENXIO;
153 }
154
155 pr_info("Initializing J-Core PIT at %p IRQ %d\n",
156 jcore_pit_base, pit_irq);
157
158 err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
159 NSEC_PER_SEC, 400, 32,
160 jcore_clocksource_read);
161 if (err) {
162 pr_err("Error registering clocksource device: %d\n", err);
163 return err;
164 }
165
166 sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
167
168 jcore_pit_percpu = alloc_percpu(struct jcore_pit);
169 if (!jcore_pit_percpu) {
170 pr_err("Failed to allocate memory for clock event device\n");
171 return -ENOMEM;
172 }
173
174 err = request_irq(pit_irq, jcore_timer_interrupt,
175 IRQF_TIMER | IRQF_PERCPU,
176 "jcore_pit", jcore_pit_percpu);
177 if (err) {
178 pr_err("pit irq request failed: %d\n", err);
179 free_percpu(jcore_pit_percpu);
180 return err;
181 }
182
183 /*
184 * The J-Core PIT is not hard-wired to a particular IRQ, but
185 * integrated with the interrupt controller such that the IRQ it
186 * generates is programmable, as follows:
187 *
188 * The bit layout of the PIT enable register is:
189 *
190 * .....e..ppppiiiiiiii............
191 *
192 * where the .'s indicate unrelated/unused bits, e is enable,
193 * p is priority, and i is hard irq number.
194 *
195 * For the PIT included in AIC1 (obsolete but still in use),
196 * any hard irq (trap number) can be programmed via the 8
197 * iiiiiiii bits, and a priority (0-15) is programmable
198 * separately in the pppp bits.
199 *
200 * For the PIT included in AIC2 (current), the programming
201 * interface is equivalent modulo interrupt mapping. This is
202 * why a different compatible tag was not used. However only
203 * traps 64-127 (the ones actually intended to be used for
204 * interrupts, rather than syscalls/exceptions/etc.) can be
205 * programmed (the high 2 bits of i are ignored) and the
206 * priority pppp is <<2'd and or'd onto the irq number. This
207 * choice seems to have been made on the hardware engineering
208 * side under an assumption that preserving old AIC1 priority
209 * mappings was important. Future models will likely ignore
210 * the pppp field.
211 */
212 hwirq = irq_get_irq_data(pit_irq)->hwirq;
213 irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
214 enable_val = (1U << PIT_ENABLE_SHIFT)
215 | (hwirq << PIT_IRQ_SHIFT)
216 | (irqprio << PIT_PRIO_SHIFT);
217
218 for_each_present_cpu(cpu) {
219 struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
220
221 pit->base = of_iomap(node, cpu);
222 if (!pit->base) {
223 pr_err("Unable to map PIT for cpu %u\n", cpu);
224 continue;
225 }
226
227 pit->ced.name = "jcore_pit";
228 pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
229 | CLOCK_EVT_FEAT_ONESHOT
230 | CLOCK_EVT_FEAT_PERCPU;
231 pit->ced.cpumask = cpumask_of(cpu);
232 pit->ced.rating = 400;
233 pit->ced.irq = pit_irq;
234 pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
235 pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
236 pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
237 pit->ced.set_next_event = jcore_pit_set_next_event;
238
239 pit->enable_val = enable_val;
240 }
241
242 cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
243 "AP_JCORE_TIMER_STARTING",
244 jcore_pit_local_init, NULL);
245
246 return 0;
247}
248
249CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index c184eb84101e..4f87f3e76d83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
152 return IRQ_HANDLED; 152 return IRQ_HANDLED;
153} 153}
154 154
155static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
156{
157 struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
158
159 return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
160}
161
155static int sun5i_rate_cb_clksrc(struct notifier_block *nb, 162static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
156 unsigned long event, void *data) 163 unsigned long event, void *data)
157{ 164{
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
210 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, 217 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
211 base + TIMER_CTL_REG(1)); 218 base + TIMER_CTL_REG(1));
212 219
213 ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name, 220 cs->clksrc.name = node->name;
214 rate, 340, 32, clocksource_mmio_readl_down); 221 cs->clksrc.rating = 340;
222 cs->clksrc.read = sun5i_clksrc_read;
223 cs->clksrc.mask = CLOCKSOURCE_MASK(32);
224 cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
225
226 ret = clocksource_register_hz(&cs->clksrc, rate);
215 if (ret) { 227 if (ret) {
216 pr_err("Couldn't register clock source.\n"); 228 pr_err("Couldn't register clock source.\n");
217 goto err_remove_notifier; 229 goto err_remove_notifier;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f535f8123258..4737520ec823 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct _pid {
179/** 179/**
180 * struct cpudata - Per CPU instance data storage 180 * struct cpudata - Per CPU instance data storage
181 * @cpu: CPU number for this instance data 181 * @cpu: CPU number for this instance data
182 * @policy: CPUFreq policy value
182 * @update_util: CPUFreq utility callback information 183 * @update_util: CPUFreq utility callback information
183 * @update_util_set: CPUFreq utility callback is set 184 * @update_util_set: CPUFreq utility callback is set
184 * @iowait_boost: iowait-related boost fraction 185 * @iowait_boost: iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
201struct cpudata { 202struct cpudata {
202 int cpu; 203 int cpu;
203 204
205 unsigned int policy;
204 struct update_util_data update_util; 206 struct update_util_data update_util;
205 bool update_util_set; 207 bool update_util_set;
206 208
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
1142 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1144 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
1143} 1145}
1144 1146
1145static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1147static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1146{ 1148{
1147 int pstate = cpu->pstate.min_pstate;
1148
1149 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1149 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1150 cpu->pstate.current_pstate = pstate; 1150 cpu->pstate.current_pstate = pstate;
1151 /* 1151 /*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1157 pstate_funcs.get_val(cpu, pstate)); 1157 pstate_funcs.get_val(cpu, pstate));
1158} 1158}
1159 1159
1160static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1161{
1162 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1163}
1164
1165static void intel_pstate_max_within_limits(struct cpudata *cpu)
1166{
1167 int min_pstate, max_pstate;
1168
1169 update_turbo_state();
1170 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
1171 intel_pstate_set_pstate(cpu, max_pstate);
1172}
1173
1160static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1174static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1161{ 1175{
1162 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1176 cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1325,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1325 1339
1326 from = cpu->pstate.current_pstate; 1340 from = cpu->pstate.current_pstate;
1327 1341
1328 target_pstate = pstate_funcs.get_target_pstate(cpu); 1342 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
1343 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
1329 1344
1330 intel_pstate_update_pstate(cpu, target_pstate); 1345 intel_pstate_update_pstate(cpu, target_pstate);
1331 1346
@@ -1491,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1491 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1506 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
1492 policy->cpuinfo.max_freq, policy->max); 1507 policy->cpuinfo.max_freq, policy->max);
1493 1508
1494 cpu = all_cpu_data[0]; 1509 cpu = all_cpu_data[policy->cpu];
1510 cpu->policy = policy->policy;
1511
1495 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 1512 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
1496 policy->max < policy->cpuinfo.max_freq && 1513 policy->max < policy->cpuinfo.max_freq &&
1497 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 1514 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1499,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1499 policy->max = policy->cpuinfo.max_freq; 1516 policy->max = policy->cpuinfo.max_freq;
1500 } 1517 }
1501 1518
1502 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1519 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
1503 limits = &performance_limits; 1520 limits = &performance_limits;
1504 if (policy->max >= policy->cpuinfo.max_freq) { 1521 if (policy->max >= policy->cpuinfo.max_freq) {
1505 pr_debug("set performance\n"); 1522 pr_debug("set performance\n");
@@ -1535,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1535 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1552 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1536 1553
1537 out: 1554 out:
1555 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
1556 /*
1557 * NOHZ_FULL CPUs need this as the governor callback may not
1558 * be invoked on them.
1559 */
1560 intel_pstate_clear_update_util_hook(policy->cpu);
1561 intel_pstate_max_within_limits(cpu);
1562 }
1563
1538 intel_pstate_set_update_util_hook(policy->cpu); 1564 intel_pstate_set_update_util_hook(policy->cpu);
1539 1565
1540 intel_pstate_hwp_set_policy(policy); 1566 intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index daadd20aa936..3e2ab3b14eea 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -14,7 +14,7 @@ if DEV_DAX
14 14
15config DEV_DAX_PMEM 15config DEV_DAX_PMEM
16 tristate "PMEM DAX: direct access to persistent memory" 16 tristate "PMEM DAX: direct access to persistent memory"
17 depends on NVDIMM_DAX 17 depends on LIBNVDIMM && NVDIMM_DAX
18 default DEV_DAX 18 default DEV_DAX
19 help 19 help
20 Support raw access to persistent memory. Note that this 20 Support raw access to persistent memory. Note that this
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9630d8837ba9..4a15fa5df98b 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
44 44
45 dev_dbg(dax_pmem->dev, "%s\n", __func__); 45 dev_dbg(dax_pmem->dev, "%s\n", __func__);
46 percpu_ref_exit(ref); 46 percpu_ref_exit(ref);
47 wait_for_completion(&dax_pmem->cmp);
48} 47}
49 48
50static void dax_pmem_percpu_kill(void *data) 49static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
54 53
55 dev_dbg(dax_pmem->dev, "%s\n", __func__); 54 dev_dbg(dax_pmem->dev, "%s\n", __func__);
56 percpu_ref_kill(ref); 55 percpu_ref_kill(ref);
56 wait_for_completion(&dax_pmem->cmp);
57} 57}
58 58
59static int dax_pmem_probe(struct device *dev) 59static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977b0da5..180f0a96528c 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
566 566
567 lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), 567 lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
568 PCILYNX_MAX_REGISTER); 568 PCILYNX_MAX_REGISTER);
569 if (lynx->registers == NULL) {
570 dev_err(&dev->dev, "Failed to map registers\n");
571 ret = -ENOMEM;
572 goto fail_deallocate_lynx;
573 }
569 574
570 lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, 575 lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
571 sizeof(struct pcl), &lynx->rcv_start_pcl_bus); 576 sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
578 lynx->rcv_buffer == NULL) { 583 lynx->rcv_buffer == NULL) {
579 dev_err(&dev->dev, "Failed to allocate receive buffer\n"); 584 dev_err(&dev->dev, "Failed to allocate receive buffer\n");
580 ret = -ENOMEM; 585 ret = -ENOMEM;
581 goto fail_deallocate; 586 goto fail_deallocate_buffers;
582 } 587 }
583 lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); 588 lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
584 lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); 589 lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
641 dev_err(&dev->dev, 646 dev_err(&dev->dev,
642 "Failed to allocate shared interrupt %d\n", dev->irq); 647 "Failed to allocate shared interrupt %d\n", dev->irq);
643 ret = -EIO; 648 ret = -EIO;
644 goto fail_deallocate; 649 goto fail_deallocate_buffers;
645 } 650 }
646 651
647 lynx->misc.parent = &dev->dev; 652 lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
668 reg_write(lynx, PCI_INT_ENABLE, 0); 673 reg_write(lynx, PCI_INT_ENABLE, 0);
669 free_irq(lynx->pci_device->irq, lynx); 674 free_irq(lynx->pci_device->irq, lynx);
670 675
671fail_deallocate: 676fail_deallocate_buffers:
672 if (lynx->rcv_start_pcl) 677 if (lynx->rcv_start_pcl)
673 pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 678 pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
674 lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); 679 lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
679 pci_free_consistent(lynx->pci_device, PAGE_SIZE, 684 pci_free_consistent(lynx->pci_device, PAGE_SIZE,
680 lynx->rcv_buffer, lynx->rcv_buffer_bus); 685 lynx->rcv_buffer, lynx->rcv_buffer_bus);
681 iounmap(lynx->registers); 686 iounmap(lynx->registers);
687
688fail_deallocate_lynx:
682 kfree(lynx); 689 kfree(lynx);
683 690
684fail_disable: 691fail_disable:
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c06945160a41..5e23e2d305e7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
11 -mno-mmx -mno-sse 11 -mno-mmx -mno-sse
12 12
13cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) 13cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
14cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ 14cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
15 -fno-builtin -fpic -mno-single-pic-base 15 -fno-builtin -fpic -mno-single-pic-base
16 16
17cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt 17cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
79# decompressor. So move our .data to .data.efistub, which is preserved 79# decompressor. So move our .data to .data.efistub, which is preserved
80# explicitly by the decompressor linker script. 80# explicitly by the decompressor linker script.
81# 81#
82STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub 82STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
83 -R ___ksymtab+sort -R ___kcrctab+sort
83STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS 84STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 26ee00f6bd58..d011cb89d25e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ
284 284
285config GPIO_MOCKUP 285config GPIO_MOCKUP
286 tristate "GPIO Testing Driver" 286 tristate "GPIO Testing Driver"
287 depends on GPIOLIB 287 depends on GPIOLIB && SYSFS
288 select GPIO_SYSFS 288 select GPIO_SYSFS
289 help 289 help
290 This enables GPIO Testing driver, which provides a way to test GPIO 290 This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 9457e2022bf6..dc37dbe4b46d 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
219 { .compatible = "qca,ar9340-gpio" }, 219 { .compatible = "qca,ar9340-gpio" },
220 {}, 220 {},
221}; 221};
222MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
222 223
223static int ath79_gpio_probe(struct platform_device *pdev) 224static int ath79_gpio_probe(struct platform_device *pdev)
224{ 225{
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39527..793518a30afe 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
239 irq_hw_number_t hwirq) 239 irq_hw_number_t hwirq)
240{ 240{
241 irq_set_chip_data(irq, h->host_data); 241 irq_set_chip_data(irq, h->host_data);
242 irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); 242 irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
243 243
244 return 0; 244 return 0;
245} 245}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b9daa0bf32a4..ee1724806f46 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
308 writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR); 308 writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
309 309
310 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); 310 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
311 if (irq_base < 0) 311 if (irq_base < 0) {
312 return irq_base; 312 err = irq_base;
313 goto out_iounmap;
314 }
313 315
314 port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, 316 port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
315 &irq_domain_simple_ops, NULL); 317 &irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
349 irq_domain_remove(port->domain); 351 irq_domain_remove(port->domain);
350out_irqdesc_free: 352out_irqdesc_free:
351 irq_free_descs(irq_base, 32); 353 irq_free_descs(irq_base, 32);
354out_iounmap:
355 iounmap(port->base);
352 return err; 356 return err;
353} 357}
354 358
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e7d422a6b90b..5b0042776ec7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
409 * 801/1801/1600, bits are cleared when read. 409 * 801/1801/1600, bits are cleared when read.
410 * Edge detect register is not present on 801/1600/1801 410 * Edge detect register is not present on 801/1600/1801
411 */ 411 */
412 if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 || 412 if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
413 stmpe->partnum != STMPE1801) { 413 stmpe->partnum != STMPE1801) {
414 stmpe_reg_write(stmpe, statmsbreg + i, status[i]); 414 stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
415 stmpe_reg_write(stmpe, 415 stmpe_reg_write(stmpe,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 99256115bea5..c2a80b4cbf32 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
66 { .compatible = "technologic,ts4800-gpio", }, 66 { .compatible = "technologic,ts4800-gpio", },
67 {}, 67 {},
68}; 68};
69MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
69 70
70static struct platform_driver ts4800_gpio_driver = { 71static struct platform_driver ts4800_gpio_driver = {
71 .driver = { 72 .driver = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 58ece201b8e6..72a4b326fd0d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
653{ 653{
654 int idx, i; 654 int idx, i;
655 unsigned int irq_flags; 655 unsigned int irq_flags;
656 int ret = -ENOENT;
656 657
657 for (i = 0, idx = 0; idx <= index; i++) { 658 for (i = 0, idx = 0; idx <= index; i++) {
658 struct acpi_gpio_info info; 659 struct acpi_gpio_info info;
659 struct gpio_desc *desc; 660 struct gpio_desc *desc;
660 661
661 desc = acpi_get_gpiod_by_index(adev, NULL, i, &info); 662 desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
662 if (IS_ERR(desc)) 663 if (IS_ERR(desc)) {
664 ret = PTR_ERR(desc);
663 break; 665 break;
666 }
664 if (info.gpioint && idx++ == index) { 667 if (info.gpioint && idx++ == index) {
665 int irq = gpiod_to_irq(desc); 668 int irq = gpiod_to_irq(desc);
666 669
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
679 } 682 }
680 683
681 } 684 }
682 return -ENOENT; 685 return ret;
683} 686}
684EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get); 687EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
685 688
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f0fc3a0d37c8..20e09b7c2de3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -333,6 +333,13 @@ struct linehandle_state {
333 u32 numdescs; 333 u32 numdescs;
334}; 334};
335 335
336#define GPIOHANDLE_REQUEST_VALID_FLAGS \
337 (GPIOHANDLE_REQUEST_INPUT | \
338 GPIOHANDLE_REQUEST_OUTPUT | \
339 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
340 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
341 GPIOHANDLE_REQUEST_OPEN_SOURCE)
342
336static long linehandle_ioctl(struct file *filep, unsigned int cmd, 343static long linehandle_ioctl(struct file *filep, unsigned int cmd,
337 unsigned long arg) 344 unsigned long arg)
338{ 345{
@@ -344,6 +351,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
344 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 351 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
345 int val; 352 int val;
346 353
354 memset(&ghd, 0, sizeof(ghd));
355
347 /* TODO: check if descriptors are really input */ 356 /* TODO: check if descriptors are really input */
348 for (i = 0; i < lh->numdescs; i++) { 357 for (i = 0; i < lh->numdescs; i++) {
349 val = gpiod_get_value_cansleep(lh->descs[i]); 358 val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -444,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
444 u32 lflags = handlereq.flags; 453 u32 lflags = handlereq.flags;
445 struct gpio_desc *desc; 454 struct gpio_desc *desc;
446 455
456 if (offset >= gdev->ngpio) {
457 ret = -EINVAL;
458 goto out_free_descs;
459 }
460
461 /* Return an error if a unknown flag is set */
462 if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
463 ret = -EINVAL;
464 goto out_free_descs;
465 }
466
447 desc = &gdev->descs[offset]; 467 desc = &gdev->descs[offset];
448 ret = gpiod_request(desc, lh->label); 468 ret = gpiod_request(desc, lh->label);
449 if (ret) 469 if (ret)
@@ -536,6 +556,10 @@ struct lineevent_state {
536 struct mutex read_lock; 556 struct mutex read_lock;
537}; 557};
538 558
559#define GPIOEVENT_REQUEST_VALID_FLAGS \
560 (GPIOEVENT_REQUEST_RISING_EDGE | \
561 GPIOEVENT_REQUEST_FALLING_EDGE)
562
539static unsigned int lineevent_poll(struct file *filep, 563static unsigned int lineevent_poll(struct file *filep,
540 struct poll_table_struct *wait) 564 struct poll_table_struct *wait)
541{ 565{
@@ -623,6 +647,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
623 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) { 647 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
624 int val; 648 int val;
625 649
650 memset(&ghd, 0, sizeof(ghd));
651
626 val = gpiod_get_value_cansleep(le->desc); 652 val = gpiod_get_value_cansleep(le->desc);
627 if (val < 0) 653 if (val < 0)
628 return val; 654 return val;
@@ -726,6 +752,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
726 lflags = eventreq.handleflags; 752 lflags = eventreq.handleflags;
727 eflags = eventreq.eventflags; 753 eflags = eventreq.eventflags;
728 754
755 if (offset >= gdev->ngpio) {
756 ret = -EINVAL;
757 goto out_free_label;
758 }
759
760 /* Return an error if a unknown flag is set */
761 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
762 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
763 ret = -EINVAL;
764 goto out_free_label;
765 }
766
729 /* This is just wrong: we don't look for events on output lines */ 767 /* This is just wrong: we don't look for events on output lines */
730 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 768 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
731 ret = -EINVAL; 769 ret = -EINVAL;
@@ -823,6 +861,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
823 if (cmd == GPIO_GET_CHIPINFO_IOCTL) { 861 if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
824 struct gpiochip_info chipinfo; 862 struct gpiochip_info chipinfo;
825 863
864 memset(&chipinfo, 0, sizeof(chipinfo));
865
826 strncpy(chipinfo.name, dev_name(&gdev->dev), 866 strncpy(chipinfo.name, dev_name(&gdev->dev),
827 sizeof(chipinfo.name)); 867 sizeof(chipinfo.name));
828 chipinfo.name[sizeof(chipinfo.name)-1] = '\0'; 868 chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +879,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
839 879
840 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) 880 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
841 return -EFAULT; 881 return -EFAULT;
842 if (lineinfo.line_offset > gdev->ngpio) 882 if (lineinfo.line_offset >= gdev->ngpio)
843 return -EINVAL; 883 return -EINVAL;
844 884
845 desc = &gdev->descs[lineinfo.line_offset]; 885 desc = &gdev->descs[lineinfo.line_offset];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2e3a0543760d..e3281d4e3e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
765 return ret; 765 return ret;
766} 766}
767 767
768static void amdgpu_connector_destroy(struct drm_connector *connector) 768static void amdgpu_connector_unregister(struct drm_connector *connector)
769{ 769{
770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 770 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
771 771
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); 773 drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
774 amdgpu_connector->ddc_bus->has_aux = false; 774 amdgpu_connector->ddc_bus->has_aux = false;
775 } 775 }
776}
777
778static void amdgpu_connector_destroy(struct drm_connector *connector)
779{
780 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
781
776 amdgpu_connector_free_edid(connector); 782 amdgpu_connector_free_edid(connector);
777 kfree(amdgpu_connector->con_priv); 783 kfree(amdgpu_connector->con_priv);
778 drm_connector_unregister(connector); 784 drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
826 .dpms = drm_helper_connector_dpms, 832 .dpms = drm_helper_connector_dpms,
827 .detect = amdgpu_connector_lvds_detect, 833 .detect = amdgpu_connector_lvds_detect,
828 .fill_modes = drm_helper_probe_single_connector_modes, 834 .fill_modes = drm_helper_probe_single_connector_modes,
835 .early_unregister = amdgpu_connector_unregister,
829 .destroy = amdgpu_connector_destroy, 836 .destroy = amdgpu_connector_destroy,
830 .set_property = amdgpu_connector_set_lcd_property, 837 .set_property = amdgpu_connector_set_lcd_property,
831}; 838};
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
936 .dpms = drm_helper_connector_dpms, 943 .dpms = drm_helper_connector_dpms,
937 .detect = amdgpu_connector_vga_detect, 944 .detect = amdgpu_connector_vga_detect,
938 .fill_modes = drm_helper_probe_single_connector_modes, 945 .fill_modes = drm_helper_probe_single_connector_modes,
946 .early_unregister = amdgpu_connector_unregister,
939 .destroy = amdgpu_connector_destroy, 947 .destroy = amdgpu_connector_destroy,
940 .set_property = amdgpu_connector_set_property, 948 .set_property = amdgpu_connector_set_property,
941}; 949};
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
1203 .detect = amdgpu_connector_dvi_detect, 1211 .detect = amdgpu_connector_dvi_detect,
1204 .fill_modes = drm_helper_probe_single_connector_modes, 1212 .fill_modes = drm_helper_probe_single_connector_modes,
1205 .set_property = amdgpu_connector_set_property, 1213 .set_property = amdgpu_connector_set_property,
1214 .early_unregister = amdgpu_connector_unregister,
1206 .destroy = amdgpu_connector_destroy, 1215 .destroy = amdgpu_connector_destroy,
1207 .force = amdgpu_connector_dvi_force, 1216 .force = amdgpu_connector_dvi_force,
1208}; 1217};
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
1493 .detect = amdgpu_connector_dp_detect, 1502 .detect = amdgpu_connector_dp_detect,
1494 .fill_modes = drm_helper_probe_single_connector_modes, 1503 .fill_modes = drm_helper_probe_single_connector_modes,
1495 .set_property = amdgpu_connector_set_property, 1504 .set_property = amdgpu_connector_set_property,
1505 .early_unregister = amdgpu_connector_unregister,
1496 .destroy = amdgpu_connector_destroy, 1506 .destroy = amdgpu_connector_destroy,
1497 .force = amdgpu_connector_dvi_force, 1507 .force = amdgpu_connector_dvi_force,
1498}; 1508};
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
1502 .detect = amdgpu_connector_dp_detect, 1512 .detect = amdgpu_connector_dp_detect,
1503 .fill_modes = drm_helper_probe_single_connector_modes, 1513 .fill_modes = drm_helper_probe_single_connector_modes,
1504 .set_property = amdgpu_connector_set_lcd_property, 1514 .set_property = amdgpu_connector_set_lcd_property,
1515 .early_unregister = amdgpu_connector_unregister,
1505 .destroy = amdgpu_connector_destroy, 1516 .destroy = amdgpu_connector_destroy,
1506 .force = amdgpu_connector_dvi_force, 1517 .force = amdgpu_connector_dvi_force,
1507}; 1518};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e203e5561107..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
43 ctx->rings[i].sequence = 1; 43 ctx->rings[i].sequence = 1;
44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; 44 ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
45 } 45 }
46
47 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
48
46 /* create context entity for each ring */ 49 /* create context entity for each ring */
47 for (i = 0; i < adev->num_rings; i++) { 50 for (i = 0; i < adev->num_rings; i++) {
48 struct amdgpu_ring *ring = adev->rings[i]; 51 struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7dbe85d67d26..b4f4a9239069 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1408 for (i = 0; i < adev->num_ip_blocks; i++) { 1408 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_block_status[i].valid) 1409 if (!adev->ip_block_status[i].valid)
1410 continue; 1410 continue;
1411 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
1412 adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
1413 continue;
1414 /* enable clockgating to save power */
1415 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1416 AMD_CG_STATE_GATE);
1417 if (r) {
1418 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
1419 return r;
1420 }
1421 if (adev->ip_blocks[i].funcs->late_init) { 1411 if (adev->ip_blocks[i].funcs->late_init) {
1422 r = adev->ip_blocks[i].funcs->late_init((void *)adev); 1412 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
1423 if (r) { 1413 if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1426 } 1416 }
1427 adev->ip_block_status[i].late_initialized = true; 1417 adev->ip_block_status[i].late_initialized = true;
1428 } 1418 }
1419 /* skip CG for VCE/UVD, it's handled specially */
1420 if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
1421 adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
1422 /* enable clockgating to save power */
1423 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1424 AMD_CG_STATE_GATE);
1425 if (r) {
1426 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1427 adev->ip_blocks[i].funcs->name, r);
1428 return r;
1429 }
1430 }
1429 } 1431 }
1430 1432
1431 return 0; 1433 return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1435{ 1437{
1436 int i, r; 1438 int i, r;
1437 1439
1440 /* need to disable SMC first */
1441 for (i = 0; i < adev->num_ip_blocks; i++) {
1442 if (!adev->ip_block_status[i].hw)
1443 continue;
1444 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
1445 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1446 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1447 AMD_CG_STATE_UNGATE);
1448 if (r) {
1449 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1450 adev->ip_blocks[i].funcs->name, r);
1451 return r;
1452 }
1453 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1454 /* XXX handle errors */
1455 if (r) {
1456 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1457 adev->ip_blocks[i].funcs->name, r);
1458 }
1459 adev->ip_block_status[i].hw = false;
1460 break;
1461 }
1462 }
1463
1438 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1464 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1439 if (!adev->ip_block_status[i].hw) 1465 if (!adev->ip_block_status[i].hw)
1440 continue; 1466 continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2073 if (!adev->ip_block_status[i].valid) 2099 if (!adev->ip_block_status[i].valid)
2074 continue; 2100 continue;
2075 if (adev->ip_blocks[i].funcs->check_soft_reset) 2101 if (adev->ip_blocks[i].funcs->check_soft_reset)
2076 adev->ip_blocks[i].funcs->check_soft_reset(adev); 2102 adev->ip_block_status[i].hang =
2103 adev->ip_blocks[i].funcs->check_soft_reset(adev);
2077 if (adev->ip_block_status[i].hang) { 2104 if (adev->ip_block_status[i].hang) {
2078 DRM_INFO("IP block:%d is hang!\n", i); 2105 DRM_INFO("IP block:%d is hang!\n", i);
2079 asic_hang = true; 2106 asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2102 2129
2103static bool amdgpu_need_full_reset(struct amdgpu_device *adev) 2130static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2104{ 2131{
2105 if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || 2132 int i;
2106 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || 2133
2107 adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || 2134 for (i = 0; i < adev->num_ip_blocks; i++) {
2108 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { 2135 if (!adev->ip_block_status[i].valid)
2109 DRM_INFO("Some block need full reset!\n"); 2136 continue;
2110 return true; 2137 if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
2138 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
2139 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
2140 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
2141 if (adev->ip_block_status[i].hang) {
2142 DRM_INFO("Some block need full reset!\n");
2143 return true;
2144 }
2145 }
2111 } 2146 }
2112 return false; 2147 return false;
2113} 2148}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
113 printk("\n"); 113 printk("\n");
114} 114}
115 115
116
116u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 117u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
117{ 118{
118 struct drm_device *dev = adev->ddev; 119 struct drm_device *dev = adev->ddev;
119 struct drm_crtc *crtc; 120 struct drm_crtc *crtc;
120 struct amdgpu_crtc *amdgpu_crtc; 121 struct amdgpu_crtc *amdgpu_crtc;
121 u32 line_time_us, vblank_lines; 122 u32 vblank_in_pixels;
122 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 123 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
123 124
124 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 125 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
125 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 126 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
126 amdgpu_crtc = to_amdgpu_crtc(crtc); 127 amdgpu_crtc = to_amdgpu_crtc(crtc);
127 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 128 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
128 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 129 vblank_in_pixels =
129 amdgpu_crtc->hw_mode.clock; 130 amdgpu_crtc->hw_mode.crtc_htotal *
130 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 131 (amdgpu_crtc->hw_mode.crtc_vblank_end -
131 amdgpu_crtc->hw_mode.crtc_vdisplay + 132 amdgpu_crtc->hw_mode.crtc_vdisplay +
132 (amdgpu_crtc->v_border * 2); 133 (amdgpu_crtc->v_border * 2));
133 vblank_time_us = vblank_lines * line_time_us; 134
135 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
134 break; 136 break;
135 } 137 }
136 } 138 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f..f3efb1c5dae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = {
754 754
755int amdgpu_bo_init(struct amdgpu_device *adev) 755int amdgpu_bo_init(struct amdgpu_device *adev)
756{ 756{
757 /* reserve PAT memory space to WC for VRAM */
758 arch_io_reserve_memtype_wc(adev->mc.aper_base,
759 adev->mc.aper_size);
760
757 /* Add an MTRR for the VRAM */ 761 /* Add an MTRR for the VRAM */
758 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, 762 adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
759 adev->mc.aper_size); 763 adev->mc.aper_size);
@@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
769{ 773{
770 amdgpu_ttm_fini(adev); 774 amdgpu_ttm_fini(adev);
771 arch_phys_wc_del(adev->mc.vram_mtrr); 775 arch_phys_wc_del(adev->mc.vram_mtrr);
776 arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
772} 777}
773 778
774int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 779int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e1fa8731d1e2..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
345 ent = debugfs_create_file(name, 345 ent = debugfs_create_file(name,
346 S_IFREG | S_IRUGO, root, 346 S_IFREG | S_IRUGO, root,
347 ring, &amdgpu_debugfs_ring_fops); 347 ring, &amdgpu_debugfs_ring_fops);
348 if (IS_ERR(ent)) 348 if (!ent)
349 return PTR_ERR(ent); 349 return -ENOMEM;
350 350
351 i_size_write(ent->d_inode, ring->ring_size + 12); 351 i_size_write(ent->d_inode, ring->ring_size + 12);
352 ring->ent = ent; 352 ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
555int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) 555int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
556{ 556{
557 struct amdgpu_ttm_tt *gtt = (void *)ttm; 557 struct amdgpu_ttm_tt *gtt = (void *)ttm;
558 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 558 unsigned int flags = 0;
559 unsigned pinned = 0; 559 unsigned pinned = 0;
560 int r; 560 int r;
561 561
562 if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
563 flags |= FOLL_WRITE;
564
562 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 565 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
563 /* check that we only use anonymous memory 566 /* check that we only use anonymous memory
564 to prevent problems with writeback */ 567 to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
581 list_add(&guptask.list, &gtt->guptasks); 584 list_add(&guptask.list, &gtt->guptasks);
582 spin_unlock(&gtt->guptasklock); 585 spin_unlock(&gtt->guptasklock);
583 586
584 r = get_user_pages(userptr, num_pages, write, 0, p, NULL); 587 r = get_user_pages(userptr, num_pages, flags, p, NULL);
585 588
586 spin_lock(&gtt->guptasklock); 589 spin_lock(&gtt->guptasklock);
587 list_del(&guptask.list); 590 list_del(&guptask.list);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f80a0834e889..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
1514 return 0; 1514 return 0;
1515} 1515}
1516 1516
1517/* borrowed from KV, need future unify */
1518static int cz_dpm_get_temperature(struct amdgpu_device *adev) 1517static int cz_dpm_get_temperature(struct amdgpu_device *adev)
1519{ 1518{
1520 int actual_temp = 0; 1519 int actual_temp = 0;
1521 uint32_t temp = RREG32_SMC(0xC0300E0C); 1520 uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
1521 uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1522 1522
1523 if (temp) 1523 if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1524 actual_temp = 1000 * ((temp / 8) - 49); 1524 actual_temp = 1000 * ((temp / 8) - 49);
1525 else
1526 actual_temp = 1000 * (temp / 8);
1525 1527
1526 return actual_temp; 1528 return actual_temp;
1527} 1529}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 613ebb7ed50f..4108c686aa7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
3188 return 0; 3188 return 0;
3189} 3189}
3190 3190
3191static int dce_v10_0_check_soft_reset(void *handle) 3191static bool dce_v10_0_check_soft_reset(void *handle)
3192{ 3192{
3193 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3193 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3194 3194
3195 if (dce_v10_0_is_display_hung(adev)) 3195 return dce_v10_0_is_display_hung(adev);
3196 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
3197 else
3198 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
3199
3200 return 0;
3201} 3196}
3202 3197
3203static int dce_v10_0_soft_reset(void *handle) 3198static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
3205 u32 srbm_soft_reset = 0, tmp; 3200 u32 srbm_soft_reset = 0, tmp;
3206 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3201 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3207 3202
3208 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
3209 return 0;
3210
3211 if (dce_v10_0_is_display_hung(adev)) 3203 if (dce_v10_0_is_display_hung(adev))
3212 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3204 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3213 3205
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6c6ff57b1c95..ee6a48a09214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
4087static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) 4087static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4088{ 4088{
4089 int r; 4089 int r;
4090 u32 tmp;
4090 4091
4091 gfx_v8_0_rlc_stop(adev); 4092 gfx_v8_0_rlc_stop(adev);
4092 4093
4093 /* disable CG */ 4094 /* disable CG */
4094 WREG32(mmRLC_CGCG_CGLS_CTRL, 0); 4095 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
4096 tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
4097 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4098 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
4095 if (adev->asic_type == CHIP_POLARIS11 || 4099 if (adev->asic_type == CHIP_POLARIS11 ||
4096 adev->asic_type == CHIP_POLARIS10) 4100 adev->asic_type == CHIP_POLARIS10) {
4097 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); 4101 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
4102 tmp &= ~0x3;
4103 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
4104 }
4098 4105
4099 /* disable PG */ 4106 /* disable PG */
4100 WREG32(mmRLC_PG_CNTL, 0); 4107 WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
5137 return -ETIMEDOUT; 5144 return -ETIMEDOUT;
5138} 5145}
5139 5146
5140static int gfx_v8_0_check_soft_reset(void *handle) 5147static bool gfx_v8_0_check_soft_reset(void *handle)
5141{ 5148{
5142 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5149 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5143 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5150 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
5189 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); 5196 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
5190 5197
5191 if (grbm_soft_reset || srbm_soft_reset) { 5198 if (grbm_soft_reset || srbm_soft_reset) {
5192 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
5193 adev->gfx.grbm_soft_reset = grbm_soft_reset; 5199 adev->gfx.grbm_soft_reset = grbm_soft_reset;
5194 adev->gfx.srbm_soft_reset = srbm_soft_reset; 5200 adev->gfx.srbm_soft_reset = srbm_soft_reset;
5201 return true;
5195 } else { 5202 } else {
5196 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
5197 adev->gfx.grbm_soft_reset = 0; 5203 adev->gfx.grbm_soft_reset = 0;
5198 adev->gfx.srbm_soft_reset = 0; 5204 adev->gfx.srbm_soft_reset = 0;
5205 return false;
5199 } 5206 }
5200
5201 return 0;
5202} 5207}
5203 5208
5204static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, 5209static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
5226 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5231 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5227 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5232 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5228 5233
5229 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) 5234 if ((!adev->gfx.grbm_soft_reset) &&
5235 (!adev->gfx.srbm_soft_reset))
5230 return 0; 5236 return 0;
5231 5237
5232 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5238 grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
5264 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5270 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5265 u32 tmp; 5271 u32 tmp;
5266 5272
5267 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) 5273 if ((!adev->gfx.grbm_soft_reset) &&
5274 (!adev->gfx.srbm_soft_reset))
5268 return 0; 5275 return 0;
5269 5276
5270 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5277 grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5334 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5341 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5335 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5342 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5336 5343
5337 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) 5344 if ((!adev->gfx.grbm_soft_reset) &&
5345 (!adev->gfx.srbm_soft_reset))
5338 return 0; 5346 return 0;
5339 5347
5340 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5348 grbm_soft_reset = adev->gfx.grbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1b319f5bc696..c22ef140a542 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
1099 1099
1100} 1100}
1101 1101
1102static int gmc_v8_0_check_soft_reset(void *handle) 1102static bool gmc_v8_0_check_soft_reset(void *handle)
1103{ 1103{
1104 u32 srbm_soft_reset = 0; 1104 u32 srbm_soft_reset = 0;
1105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
1116 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1116 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1117 } 1117 }
1118 if (srbm_soft_reset) { 1118 if (srbm_soft_reset) {
1119 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
1120 adev->mc.srbm_soft_reset = srbm_soft_reset; 1119 adev->mc.srbm_soft_reset = srbm_soft_reset;
1120 return true;
1121 } else { 1121 } else {
1122 adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
1123 adev->mc.srbm_soft_reset = 0; 1122 adev->mc.srbm_soft_reset = 0;
1123 return false;
1124 } 1124 }
1125 return 0;
1126} 1125}
1127 1126
1128static int gmc_v8_0_pre_soft_reset(void *handle) 1127static int gmc_v8_0_pre_soft_reset(void *handle)
1129{ 1128{
1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1129 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131 1130
1132 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) 1131 if (!adev->mc.srbm_soft_reset)
1133 return 0; 1132 return 0;
1134 1133
1135 gmc_v8_0_mc_stop(adev, &adev->mc.save); 1134 gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
1145 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146 u32 srbm_soft_reset; 1145 u32 srbm_soft_reset;
1147 1146
1148 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) 1147 if (!adev->mc.srbm_soft_reset)
1149 return 0; 1148 return 0;
1150 srbm_soft_reset = adev->mc.srbm_soft_reset; 1149 srbm_soft_reset = adev->mc.srbm_soft_reset;
1151 1150
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
1175{ 1174{
1176 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1175 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1177 1176
1178 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) 1177 if (!adev->mc.srbm_soft_reset)
1179 return 0; 1178 return 0;
1180 1179
1181 gmc_v8_0_mc_resume(adev, &adev->mc.save); 1180 gmc_v8_0_mc_resume(adev, &adev->mc.save);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index f325fd86430b..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
1268 return -ETIMEDOUT; 1268 return -ETIMEDOUT;
1269} 1269}
1270 1270
1271static int sdma_v3_0_check_soft_reset(void *handle) 1271static bool sdma_v3_0_check_soft_reset(void *handle)
1272{ 1272{
1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274 u32 srbm_soft_reset = 0; 1274 u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
1281 } 1281 }
1282 1282
1283 if (srbm_soft_reset) { 1283 if (srbm_soft_reset) {
1284 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
1285 adev->sdma.srbm_soft_reset = srbm_soft_reset; 1284 adev->sdma.srbm_soft_reset = srbm_soft_reset;
1285 return true;
1286 } else { 1286 } else {
1287 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
1288 adev->sdma.srbm_soft_reset = 0; 1287 adev->sdma.srbm_soft_reset = 0;
1288 return false;
1289 } 1289 }
1290
1291 return 0;
1292} 1290}
1293 1291
1294static int sdma_v3_0_pre_soft_reset(void *handle) 1292static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 u32 srbm_soft_reset = 0; 1295 u32 srbm_soft_reset = 0;
1298 1296
1299 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) 1297 if (!adev->sdma.srbm_soft_reset)
1300 return 0; 1298 return 0;
1301 1299
1302 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1300 srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1313 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 u32 srbm_soft_reset = 0; 1314 u32 srbm_soft_reset = 0;
1317 1315
1318 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) 1316 if (!adev->sdma.srbm_soft_reset)
1319 return 0; 1317 return 0;
1320 1318
1321 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1319 srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
1335 u32 srbm_soft_reset = 0; 1333 u32 srbm_soft_reset = 0;
1336 u32 tmp; 1334 u32 tmp;
1337 1335
1338 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) 1336 if (!adev->sdma.srbm_soft_reset)
1339 return 0; 1337 return 0;
1340 1338
1341 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1339 srbm_soft_reset = adev->sdma.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8bd08925b370..3de7bca5854b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3499 max_sclk = 75000; 3499 max_sclk = 75000;
3500 max_mclk = 80000; 3500 max_mclk = 80000;
3501 } 3501 }
3502 /* Limit clocks for some HD8600 parts */
3503 if (adev->pdev->device == 0x6660 &&
3504 adev->pdev->revision == 0x83) {
3505 max_sclk = 75000;
3506 max_mclk = 80000;
3507 }
3502 3508
3503 if (rps->vce_active) { 3509 if (rps->vce_active) {
3504 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 3510 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index d127d59f953a..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
373 return -ETIMEDOUT; 373 return -ETIMEDOUT;
374} 374}
375 375
376static int tonga_ih_check_soft_reset(void *handle) 376static bool tonga_ih_check_soft_reset(void *handle)
377{ 377{
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379 u32 srbm_soft_reset = 0; 379 u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
384 SOFT_RESET_IH, 1); 384 SOFT_RESET_IH, 1);
385 385
386 if (srbm_soft_reset) { 386 if (srbm_soft_reset) {
387 adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
388 adev->irq.srbm_soft_reset = srbm_soft_reset; 387 adev->irq.srbm_soft_reset = srbm_soft_reset;
388 return true;
389 } else { 389 } else {
390 adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
391 adev->irq.srbm_soft_reset = 0; 390 adev->irq.srbm_soft_reset = 0;
391 return false;
392 } 392 }
393
394 return 0;
395} 393}
396 394
397static int tonga_ih_pre_soft_reset(void *handle) 395static int tonga_ih_pre_soft_reset(void *handle)
398{ 396{
399 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
400 398
401 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) 399 if (!adev->irq.srbm_soft_reset)
402 return 0; 400 return 0;
403 401
404 return tonga_ih_hw_fini(adev); 402 return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
408{ 406{
409 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
410 408
411 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) 409 if (!adev->irq.srbm_soft_reset)
412 return 0; 410 return 0;
413 411
414 return tonga_ih_hw_init(adev); 412 return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
419 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 417 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
420 u32 srbm_soft_reset; 418 u32 srbm_soft_reset;
421 419
422 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) 420 if (!adev->irq.srbm_soft_reset)
423 return 0; 421 return 0;
424 srbm_soft_reset = adev->irq.srbm_soft_reset; 422 srbm_soft_reset = adev->irq.srbm_soft_reset;
425 423
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0fd9f21ed95..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
770} 770}
771 771
772#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd 772#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
773static int uvd_v6_0_check_soft_reset(void *handle) 773static bool uvd_v6_0_check_soft_reset(void *handle)
774{ 774{
775 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 775 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
776 u32 srbm_soft_reset = 0; 776 u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
782 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); 782 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
783 783
784 if (srbm_soft_reset) { 784 if (srbm_soft_reset) {
785 adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
786 adev->uvd.srbm_soft_reset = srbm_soft_reset; 785 adev->uvd.srbm_soft_reset = srbm_soft_reset;
786 return true;
787 } else { 787 } else {
788 adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
789 adev->uvd.srbm_soft_reset = 0; 788 adev->uvd.srbm_soft_reset = 0;
789 return false;
790 } 790 }
791 return 0;
792} 791}
792
793static int uvd_v6_0_pre_soft_reset(void *handle) 793static int uvd_v6_0_pre_soft_reset(void *handle)
794{ 794{
795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
796 796
797 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) 797 if (!adev->uvd.srbm_soft_reset)
798 return 0; 798 return 0;
799 799
800 uvd_v6_0_stop(adev); 800 uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
806 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 806 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
807 u32 srbm_soft_reset; 807 u32 srbm_soft_reset;
808 808
809 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) 809 if (!adev->uvd.srbm_soft_reset)
810 return 0; 810 return 0;
811 srbm_soft_reset = adev->uvd.srbm_soft_reset; 811 srbm_soft_reset = adev->uvd.srbm_soft_reset;
812 812
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
836{ 836{
837 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 837 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
838 838
839 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) 839 if (!adev->uvd.srbm_soft_reset)
840 return 0; 840 return 0;
841 841
842 mdelay(5); 842 mdelay(5);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3f6db4ec0102..8533269ec160 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
561#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 561#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
562 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 562 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
563 563
564static int vce_v3_0_check_soft_reset(void *handle) 564static bool vce_v3_0_check_soft_reset(void *handle)
565{ 565{
566 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 566 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
567 u32 srbm_soft_reset = 0; 567 u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
592 } 592 }
593 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 593 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
594 mutex_unlock(&adev->grbm_idx_mutex);
594 595
595 if (srbm_soft_reset) { 596 if (srbm_soft_reset) {
596 adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
597 adev->vce.srbm_soft_reset = srbm_soft_reset; 597 adev->vce.srbm_soft_reset = srbm_soft_reset;
598 return true;
598 } else { 599 } else {
599 adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
600 adev->vce.srbm_soft_reset = 0; 600 adev->vce.srbm_soft_reset = 0;
601 return false;
601 } 602 }
602 mutex_unlock(&adev->grbm_idx_mutex);
603 return 0;
604} 603}
605 604
606static int vce_v3_0_soft_reset(void *handle) 605static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
608 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 607 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
609 u32 srbm_soft_reset; 608 u32 srbm_soft_reset;
610 609
611 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 610 if (!adev->vce.srbm_soft_reset)
612 return 0; 611 return 0;
613 srbm_soft_reset = adev->vce.srbm_soft_reset; 612 srbm_soft_reset = adev->vce.srbm_soft_reset;
614 613
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
638{ 637{
639 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 638 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
640 639
641 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 640 if (!adev->vce.srbm_soft_reset)
642 return 0; 641 return 0;
643 642
644 mdelay(5); 643 mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
651{ 650{
652 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
653 652
654 if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 653 if (!adev->vce.srbm_soft_reset)
655 return 0; 654 return 0;
656 655
657 mdelay(5); 656 mdelay(5);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c934b78c9e2f..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
165 /* poll for idle */ 165 /* poll for idle */
166 int (*wait_for_idle)(void *handle); 166 int (*wait_for_idle)(void *handle);
167 /* check soft reset the IP block */ 167 /* check soft reset the IP block */
168 int (*check_soft_reset)(void *handle); 168 bool (*check_soft_reset)(void *handle);
169 /* pre soft reset the IP block */ 169 /* pre soft reset the IP block */
170 int (*pre_soft_reset)(void *handle); 170 int (*pre_soft_reset)(void *handle);
171 /* soft reset the IP block */ 171 /* soft reset the IP block */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 92b117843875..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
49 uninitialize_display_phy_access_tasks, 49 uninitialize_display_phy_access_tasks,
50 disable_gfx_voltage_island_power_gating_tasks, 50 disable_gfx_voltage_island_power_gating_tasks,
51 disable_gfx_clock_gating_tasks, 51 disable_gfx_clock_gating_tasks,
52 uninitialize_thermal_controller_tasks,
52 set_boot_state_tasks, 53 set_boot_state_tasks,
53 adjust_power_state_tasks, 54 adjust_power_state_tasks,
54 disable_dynamic_state_management_tasks, 55 disable_dynamic_state_management_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 7e4fcbbbe086..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
1785 return 0; 1785 return 0;
1786} 1786}
1787 1787
1788static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1789{
1790 int actual_temp = 0;
1791 uint32_t val = cgs_read_ind_register(hwmgr->device,
1792 CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1793 uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1794
1795 if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1796 actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1797 else
1798 actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1799
1800 return actual_temp;
1801}
1802
1788static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value) 1803static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
1789{ 1804{
1790 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 1805 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
1881 case AMDGPU_PP_SENSOR_VCE_POWER: 1896 case AMDGPU_PP_SENSOR_VCE_POWER:
1882 *value = cz_hwmgr->vce_power_gated ? 0 : 1; 1897 *value = cz_hwmgr->vce_power_gated ? 0 : 1;
1883 return 0; 1898 return 0;
1899 case AMDGPU_PP_SENSOR_GPU_TEMP:
1900 *value = cz_thermal_get_temperature(hwmgr);
1901 return 0;
1884 default: 1902 default:
1885 return -EINVAL; 1903 return -EINVAL;
1886 } 1904 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 508245d49d33..609996c84ad5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031 1031
1032 /* disable SCLK dpm */ 1032 /* disable SCLK dpm */
1033 if (!data->sclk_dpm_key_disabled) 1033 if (!data->sclk_dpm_key_disabled) {
1034 PP_ASSERT_WITH_CODE( 1034 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1035 (smum_send_msg_to_smc(hwmgr->smumgr, 1035 "Trying to disable SCLK DPM when DPM is disabled",
1036 PPSMC_MSG_DPM_Disable) == 0), 1036 return 0);
1037 "Failed to disable SCLK DPM!", 1037 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
1038 return -EINVAL); 1038 }
1039 1039
1040 /* disable MCLK dpm */ 1040 /* disable MCLK dpm */
1041 if (!data->mclk_dpm_key_disabled) { 1041 if (!data->mclk_dpm_key_disabled) {
1042 PP_ASSERT_WITH_CODE( 1042 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1043 (smum_send_msg_to_smc(hwmgr->smumgr, 1043 "Trying to disable MCLK DPM when DPM is disabled",
1044 PPSMC_MSG_MCLKDPM_Disable) == 0), 1044 return 0);
1045 "Failed to disable MCLK DPM!", 1045 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
1046 return -EINVAL);
1047 } 1046 }
1048 1047
1049 return 0; 1048 return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1069 return -EINVAL); 1068 return -EINVAL);
1070 } 1069 }
1071 1070
1072 if (smu7_disable_sclk_mclk_dpm(hwmgr)) { 1071 smu7_disable_sclk_mclk_dpm(hwmgr);
1073 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); 1072
1074 return -EINVAL; 1073 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1075 } 1074 "Trying to disable voltage DPM when DPM is disabled",
1075 return 0);
1076
1077 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
1076 1078
1077 return 0; 1079 return 0;
1078} 1080}
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1226 PP_ASSERT_WITH_CODE((0 == tmp_result), 1228 PP_ASSERT_WITH_CODE((0 == tmp_result),
1227 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 1229 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1228 1230
1229 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay); 1231 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
1230 1232
1231 tmp_result = smu7_enable_sclk_control(hwmgr); 1233 tmp_result = smu7_enable_sclk_control(hwmgr);
1232 PP_ASSERT_WITH_CODE((0 == tmp_result), 1234 PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1306 PP_ASSERT_WITH_CODE((tmp_result == 0), 1308 PP_ASSERT_WITH_CODE((tmp_result == 0),
1307 "Failed to disable thermal auto throttle!", result = tmp_result); 1309 "Failed to disable thermal auto throttle!", result = tmp_result);
1308 1310
1311 if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1312 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
1313 "Failed to disable AVFS!",
1314 return -EINVAL);
1315 }
1316
1309 tmp_result = smu7_stop_dpm(hwmgr); 1317 tmp_result = smu7_stop_dpm(hwmgr);
1310 PP_ASSERT_WITH_CODE((tmp_result == 0), 1318 PP_ASSERT_WITH_CODE((tmp_result == 0),
1311 "Failed to stop DPM!", result = tmp_result); 1319 "Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1452 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 1460 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1453 1461
1454 1462
1455 if (table_info != NULL) 1463 if (table_info == NULL)
1456 sclk_table = table_info->vdd_dep_on_sclk; 1464 return -EINVAL;
1465
1466 sclk_table = table_info->vdd_dep_on_sclk;
1457 1467
1458 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 1468 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1459 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1469 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
3802 3812
3803int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) 3813int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
3804{ 3814{
3805 const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1); 3815 const struct smu7_power_state *psa;
3806 const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2); 3816 const struct smu7_power_state *psb;
3807 int i; 3817 int i;
3808 3818
3809 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 3819 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
3810 return -EINVAL; 3820 return -EINVAL;
3811 3821
3822 psa = cast_const_phw_smu7_power_state(pstate1);
3823 psb = cast_const_phw_smu7_power_state(pstate2);
3812 /* If the two states don't even have the same number of performance levels they cannot be the same state. */ 3824 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
3813 if (psa->performance_level_count != psb->performance_level_count) { 3825 if (psa->performance_level_count != psb->performance_level_count) {
3814 *equal = false; 3826 *equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4324 .set_mclk_od = smu7_set_mclk_od, 4336 .set_mclk_od = smu7_set_mclk_od,
4325 .get_clock_by_type = smu7_get_clock_by_type, 4337 .get_clock_by_type = smu7_get_clock_by_type,
4326 .read_sensor = smu7_read_sensor, 4338 .read_sensor = smu7_read_sensor,
4339 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
4327}; 4340};
4328 4341
4329uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 4342uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index eda802bc63c8..8c889caba420 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2458 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), 2458 PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2459 "Invalid VramInfo table.", return -EINVAL); 2459 "Invalid VramInfo table.", return -EINVAL);
2460 2460
2461 if (!data->is_memory_gddr5) { 2461 if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
2462 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; 2462 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2463 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; 2463 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2464 for (k = 0; k < table->num_entries; k++) { 2464 for (k = 0; k < table->num_entries; k++) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
332{ 332{
333 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 333 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
334 334
335 if (dcrtc->dpms != dpms) { 335 if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
336 dcrtc->dpms = dpms;
337 if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
338 WARN_ON(clk_prepare_enable(dcrtc->clk));
339 armada_drm_crtc_update(dcrtc);
340 if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
341 clk_disable_unprepare(dcrtc->clk);
342 if (dpms_blanked(dpms)) 336 if (dpms_blanked(dpms))
343 armada_drm_vblank_off(dcrtc); 337 armada_drm_vblank_off(dcrtc);
344 else 338 else if (!IS_ERR(dcrtc->clk))
339 WARN_ON(clk_prepare_enable(dcrtc->clk));
340 dcrtc->dpms = dpms;
341 armada_drm_crtc_update(dcrtc);
342 if (!dpms_blanked(dpms))
345 drm_crtc_vblank_on(&dcrtc->crtc); 343 drm_crtc_vblank_on(&dcrtc->crtc);
344 else if (!IS_ERR(dcrtc->clk))
345 clk_disable_unprepare(dcrtc->clk);
346 } else if (dcrtc->dpms != dpms) {
347 dcrtc->dpms = dpms;
346 } 348 }
347} 349}
348 350
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c90520..0743e65cb240 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast)
267 return ret; 267 return ret;
268 } 268 }
269 269
270 arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
271 pci_resource_len(dev->pdev, 0));
270 ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 272 ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
271 pci_resource_len(dev->pdev, 0)); 273 pci_resource_len(dev->pdev, 0));
272 274
@@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast)
275 277
276void ast_mm_fini(struct ast_private *ast) 278void ast_mm_fini(struct ast_private *ast)
277{ 279{
280 struct drm_device *dev = ast->dev;
281
278 ttm_bo_device_release(&ast->ttm.bdev); 282 ttm_bo_device_release(&ast->ttm.bdev);
279 283
280 ast_ttm_global_release(ast); 284 ast_ttm_global_release(ast);
281 285
282 arch_phys_wc_del(ast->fb_mtrr); 286 arch_phys_wc_del(ast->fb_mtrr);
287 arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
288 pci_resource_len(dev->pdev, 0));
283} 289}
284 290
285void ast_ttm_placement(struct ast_bo *bo, int domain) 291void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438dd8733..5e7e63ce7bce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
267 return ret; 267 return ret;
268 } 268 }
269 269
270 arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
271 pci_resource_len(dev->pdev, 0));
272
270 cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 273 cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
271 pci_resource_len(dev->pdev, 0)); 274 pci_resource_len(dev->pdev, 0));
272 275
@@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
276 279
277void cirrus_mm_fini(struct cirrus_device *cirrus) 280void cirrus_mm_fini(struct cirrus_device *cirrus)
278{ 281{
282 struct drm_device *dev = cirrus->dev;
283
279 if (!cirrus->mm_inited) 284 if (!cirrus->mm_inited)
280 return; 285 return;
281 286
@@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
285 290
286 arch_phys_wc_del(cirrus->fb_mtrr); 291 arch_phys_wc_del(cirrus->fb_mtrr);
287 cirrus->fb_mtrr = 0; 292 cirrus->fb_mtrr = 0;
293 arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
294 pci_resource_len(dev->pdev, 0));
288} 295}
289 296
290void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) 297void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 1df2d33d0b40..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
54 54
55 mutex_lock(&dev->master_mutex); 55 mutex_lock(&dev->master_mutex);
56 master = dev->master; 56 master = dev->master;
57 if (!master)
58 goto out_unlock;
59
60 seq_printf(m, "%s", dev->driver->name); 57 seq_printf(m, "%s", dev->driver->name);
61 if (dev->dev) 58 if (dev->dev)
62 seq_printf(m, " dev=%s", dev_name(dev->dev)); 59 seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
65 if (dev->unique) 62 if (dev->unique)
66 seq_printf(m, " unique=%s", dev->unique); 63 seq_printf(m, " unique=%s", dev->unique);
67 seq_printf(m, "\n"); 64 seq_printf(m, "\n");
68out_unlock:
69 mutex_unlock(&dev->master_mutex); 65 mutex_unlock(&dev->master_mutex);
70 66
71 return 0; 67 return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
329 /* 329 /*
330 * Append a LINK to the submitted command buffer to return to 330 * Append a LINK to the submitted command buffer to return to
331 * the ring buffer. return_target is the ring target address. 331 * the ring buffer. return_target is the ring target address.
332 * We need three dwords: event, wait, link. 332 * We need at most 7 dwords in the return target: 2 cache flush +
333 * 2 semaphore stall + 1 event + 1 wait + 1 link.
333 */ 334 */
334 return_dwords = 3; 335 return_dwords = 7;
335 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); 336 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
336 CMD_LINK(cmdbuf, return_dwords, return_target); 337 CMD_LINK(cmdbuf, return_dwords, return_target);
337 338
338 /* 339 /*
339 * Append event, wait and link pointing back to the wait 340 * Append a cache flush, stall, event, wait and link pointing back to
340 * command to the ring buffer. 341 * the wait command to the ring buffer.
341 */ 342 */
343 if (gpu->exec_state == ETNA_PIPE_2D) {
344 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
345 VIVS_GL_FLUSH_CACHE_PE2D);
346 } else {
347 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
348 VIVS_GL_FLUSH_CACHE_DEPTH |
349 VIVS_GL_FLUSH_CACHE_COLOR);
350 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
351 VIVS_TS_FLUSH_CACHE_FLUSH);
352 }
353 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
354 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
342 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 355 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
343 VIVS_GL_EVENT_FROM_PE); 356 VIVS_GL_EVENT_FROM_PE);
344 CMD_WAIT(buffer); 357 CMD_WAIT(buffer);
345 CMD_LINK(buffer, 2, return_target + 8); 358 CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
359 buffer->user_size - 4);
346 360
347 if (drm_debug & DRM_UT_DRIVER) 361 if (drm_debug & DRM_UT_DRIVER)
348 pr_info("stream link to 0x%08x @ 0x%08x %p\n", 362 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
748 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; 748 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
749 struct page **pvec; 749 struct page **pvec;
750 uintptr_t ptr; 750 uintptr_t ptr;
751 unsigned int flags = 0;
751 752
752 pvec = drm_malloc_ab(npages, sizeof(struct page *)); 753 pvec = drm_malloc_ab(npages, sizeof(struct page *));
753 if (!pvec) 754 if (!pvec)
754 return ERR_PTR(-ENOMEM); 755 return ERR_PTR(-ENOMEM);
755 756
757 if (!etnaviv_obj->userptr.ro)
758 flags |= FOLL_WRITE;
759
756 pinned = 0; 760 pinned = 0;
757 ptr = etnaviv_obj->userptr.ptr; 761 ptr = etnaviv_obj->userptr.ptr;
758 762
759 down_read(&mm->mmap_sem); 763 down_read(&mm->mmap_sem);
760 while (pinned < npages) { 764 while (pinned < npages) {
761 ret = get_user_pages_remote(task, mm, ptr, npages - pinned, 765 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
762 !etnaviv_obj->userptr.ro, 0, 766 flags, pvec + pinned, NULL);
763 pvec + pinned, NULL);
764 if (ret < 0) 767 if (ret < 0)
765 break; 768 break;
766 769
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
330 return (u32)buf->vram_node.start; 330 return (u32)buf->vram_node.start;
331 331
332 mutex_lock(&mmu->lock); 332 mutex_lock(&mmu->lock);
333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size); 333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
334 buf->size + SZ_64K);
334 if (ret < 0) { 335 if (ret < 0) {
335 mutex_unlock(&mmu->lock); 336 mutex_unlock(&mmu->lock);
336 return 0; 337 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
488 goto err_free; 488 goto err_free;
489 } 489 }
490 490
491 ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); 491 ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
492 g2d_userptr->vec);
492 if (ret != npages) { 493 if (ret != npages) {
493 DRM_ERROR("failed to get user pages from userptr.\n"); 494 DRM_ERROR("failed to get user pages from userptr.\n");
494 if (ret < 0) 495 if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
51 DCU_MODE_DCU_MODE(DCU_MODE_OFF)); 51 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
52 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 52 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
53 DCU_UPDATE_MODE_READREG); 53 DCU_UPDATE_MODE_READREG);
54 clk_disable_unprepare(fsl_dev->pix_clk);
54} 55}
55 56
56static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) 57static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
58 struct drm_device *dev = crtc->dev; 59 struct drm_device *dev = crtc->dev;
59 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 60 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
60 61
62 clk_prepare_enable(fsl_dev->pix_clk);
61 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 63 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
62 DCU_MODE_DCU_MODE_MASK, 64 DCU_MODE_DCU_MODE_MASK,
63 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 65 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
116 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | 118 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
117 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | 119 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
118 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); 120 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
119 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
120 DCU_UPDATE_MODE_READREG);
121 return; 121 return;
122} 122}
123 123
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
267 return ret; 267 return ret;
268 } 268 }
269 269
270 ret = clk_prepare_enable(fsl_dev->pix_clk); 270 if (fsl_dev->tcon)
271 if (ret < 0) { 271 fsl_tcon_bypass_enable(fsl_dev->tcon);
272 dev_err(dev, "failed to enable pix clk\n");
273 goto disable_dcu_clk;
274 }
275
276 fsl_dcu_drm_init_planes(fsl_dev->drm); 272 fsl_dcu_drm_init_planes(fsl_dev->drm);
277 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); 273 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
278 274
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
284 enable_irq(fsl_dev->irq); 280 enable_irq(fsl_dev->irq);
285 281
286 return 0; 282 return 0;
287
288disable_dcu_clk:
289 clk_disable_unprepare(fsl_dev->clk);
290 return ret;
291} 283}
292#endif 284#endif
293 285
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
401 goto disable_clk; 393 goto disable_clk;
402 } 394 }
403 395
404 ret = clk_prepare_enable(fsl_dev->pix_clk);
405 if (ret < 0) {
406 dev_err(dev, "failed to enable pix clk\n");
407 goto unregister_pix_clk;
408 }
409
410 fsl_dev->tcon = fsl_tcon_init(dev); 396 fsl_dev->tcon = fsl_tcon_init(dev);
411 397
412 drm = drm_dev_alloc(driver, dev); 398 drm = drm_dev_alloc(driver, dev);
413 if (IS_ERR(drm)) { 399 if (IS_ERR(drm)) {
414 ret = PTR_ERR(drm); 400 ret = PTR_ERR(drm);
415 goto disable_pix_clk; 401 goto unregister_pix_clk;
416 } 402 }
417 403
418 fsl_dev->dev = dev; 404 fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
433 419
434unref: 420unref:
435 drm_dev_unref(drm); 421 drm_dev_unref(drm);
436disable_pix_clk:
437 clk_disable_unprepare(fsl_dev->pix_clk);
438unregister_pix_clk: 422unregister_pix_clk:
439 clk_unregister(fsl_dev->pix_clk); 423 clk_unregister(fsl_dev->pix_clk);
440disable_clk: 424disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
447 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); 431 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
448 432
449 clk_disable_unprepare(fsl_dev->clk); 433 clk_disable_unprepare(fsl_dev->clk);
450 clk_disable_unprepare(fsl_dev->pix_clk);
451 clk_unregister(fsl_dev->pix_clk); 434 clk_unregister(fsl_dev->pix_clk);
452 drm_put_dev(fsl_dev->drm); 435 drm_put_dev(fsl_dev->drm);
453 436
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
211 for (j = 1; j <= fsl_dev->soc->layer_regs; j++) 211 for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
212 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); 212 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
213 } 213 }
214 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
215 DCU_MODE_DCU_MODE_MASK,
216 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
217 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
218 DCU_UPDATE_MODE_READREG);
219} 214}
220 215
221struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) 216struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
20#include "fsl_dcu_drm_drv.h" 20#include "fsl_dcu_drm_drv.h"
21#include "fsl_tcon.h" 21#include "fsl_tcon.h"
22 22
23static int
24fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
25 struct drm_crtc_state *crtc_state,
26 struct drm_connector_state *conn_state)
27{
28 return 0;
29}
30
31static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
32{
33 struct drm_device *dev = encoder->dev;
34 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
35
36 if (fsl_dev->tcon)
37 fsl_tcon_bypass_disable(fsl_dev->tcon);
38}
39
40static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
41{
42 struct drm_device *dev = encoder->dev;
43 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
44
45 if (fsl_dev->tcon)
46 fsl_tcon_bypass_enable(fsl_dev->tcon);
47}
48
49static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
50 .atomic_check = fsl_dcu_drm_encoder_atomic_check,
51 .disable = fsl_dcu_drm_encoder_disable,
52 .enable = fsl_dcu_drm_encoder_enable,
53};
54
55static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) 23static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
56{ 24{
57 drm_encoder_cleanup(encoder); 25 drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
68 int ret; 36 int ret;
69 37
70 encoder->possible_crtcs = 1; 38 encoder->possible_crtcs = 1;
39
40 /* Use bypass mode for parallel RGB/LVDS encoder */
41 if (fsl_dev->tcon)
42 fsl_tcon_bypass_enable(fsl_dev->tcon);
43
71 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, 44 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
72 DRM_MODE_ENCODER_LVDS, NULL); 45 DRM_MODE_ENCODER_LVDS, NULL);
73 if (ret < 0) 46 if (ret < 0)
74 return ret; 47 return ret;
75 48
76 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
77
78 return 0; 49 return 0;
79} 50}
80 51
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); 508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
509 if (pvec != NULL) { 509 if (pvec != NULL) {
510 struct mm_struct *mm = obj->userptr.mm->mm; 510 struct mm_struct *mm = obj->userptr.mm->mm;
511 unsigned int flags = 0;
512
513 if (!obj->userptr.read_only)
514 flags |= FOLL_WRITE;
511 515
512 ret = -EFAULT; 516 ret = -EFAULT;
513 if (atomic_inc_not_zero(&mm->mm_users)) { 517 if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
517 (work->task, mm, 521 (work->task, mm,
518 obj->userptr.ptr + pinned * PAGE_SIZE, 522 obj->userptr.ptr + pinned * PAGE_SIZE,
519 npages - pinned, 523 npages - pinned,
520 !obj->userptr.read_only, 0, 524 flags,
521 pvec + pinned, NULL); 525 pvec + pinned, NULL);
522 if (ret < 0) 526 if (ret < 0)
523 break; 527 break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f2ad24..dcf7d11ac380 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev)
266 return ret; 266 return ret;
267 } 267 }
268 268
269 arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
270 pci_resource_len(dev->pdev, 0));
271
269 mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 272 mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
270 pci_resource_len(dev->pdev, 0)); 273 pci_resource_len(dev->pdev, 0));
271 274
@@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev)
274 277
275void mgag200_mm_fini(struct mga_device *mdev) 278void mgag200_mm_fini(struct mga_device *mdev)
276{ 279{
280 struct drm_device *dev = mdev->dev;
281
277 ttm_bo_device_release(&mdev->ttm.bdev); 282 ttm_bo_device_release(&mdev->ttm.bdev);
278 283
279 mgag200_ttm_global_release(mdev); 284 mgag200_ttm_global_release(mdev);
280 285
286 arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
287 pci_resource_len(dev->pdev, 0));
281 arch_phys_wc_del(mdev->fb_mtrr); 288 arch_phys_wc_del(mdev->fb_mtrr);
282 mdev->fb_mtrr = 0; 289 mdev->fb_mtrr = 0;
283} 290}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1825dbc33192..a6dbe8258040 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
398 /* VRAM init */ 398 /* VRAM init */
399 drm->gem.vram_available = drm->device.info.ram_user; 399 drm->gem.vram_available = drm->device.info.ram_user;
400 400
401 arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
402 device->func->resource_size(device, 1));
403
401 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 404 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
402 drm->gem.vram_available >> PAGE_SHIFT); 405 drm->gem.vram_available >> PAGE_SHIFT);
403 if (ret) { 406 if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
430void 433void
431nouveau_ttm_fini(struct nouveau_drm *drm) 434nouveau_ttm_fini(struct nouveau_drm *drm)
432{ 435{
436 struct nvkm_device *device = nvxx_device(&drm->device);
437
433 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 438 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
434 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 439 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
435 440
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
439 444
440 arch_phys_wc_del(drm->ttm.mtrr); 445 arch_phys_wc_del(drm->ttm.mtrr);
441 drm->ttm.mtrr = 0; 446 drm->ttm.mtrr = 0;
447 arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
448 device->func->resource_size(device, 1));
449
442} 450}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
156 struct drm_device *dev = rdev->ddev; 156 struct drm_device *dev = rdev->ddev;
157 struct drm_crtc *crtc; 157 struct drm_crtc *crtc;
158 struct radeon_crtc *radeon_crtc; 158 struct radeon_crtc *radeon_crtc;
159 u32 line_time_us, vblank_lines; 159 u32 vblank_in_pixels;
160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 160 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
161 161
162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 162 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 163 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
164 radeon_crtc = to_radeon_crtc(crtc); 164 radeon_crtc = to_radeon_crtc(crtc);
165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 165 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
166 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / 166 vblank_in_pixels =
167 radeon_crtc->hw_mode.clock; 167 radeon_crtc->hw_mode.crtc_htotal *
168 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - 168 (radeon_crtc->hw_mode.crtc_vblank_end -
169 radeon_crtc->hw_mode.crtc_vdisplay + 169 radeon_crtc->hw_mode.crtc_vdisplay +
170 (radeon_crtc->v_border * 2); 170 (radeon_crtc->v_border * 2));
171 vblank_time_us = vblank_lines * line_time_us; 171
172 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
172 break; 173 break;
173 } 174 }
174 } 175 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 50e96d2c593d..e18839d52e3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
927 return ret; 927 return ret;
928} 928}
929 929
930static void radeon_connector_unregister(struct drm_connector *connector)
931{
932 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
933
934 if (radeon_connector->ddc_bus->has_aux) {
935 drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
936 radeon_connector->ddc_bus->has_aux = false;
937 }
938}
939
930static void radeon_connector_destroy(struct drm_connector *connector) 940static void radeon_connector_destroy(struct drm_connector *connector)
931{ 941{
932 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 942 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
984 .dpms = drm_helper_connector_dpms, 994 .dpms = drm_helper_connector_dpms,
985 .detect = radeon_lvds_detect, 995 .detect = radeon_lvds_detect,
986 .fill_modes = drm_helper_probe_single_connector_modes, 996 .fill_modes = drm_helper_probe_single_connector_modes,
997 .early_unregister = radeon_connector_unregister,
987 .destroy = radeon_connector_destroy, 998 .destroy = radeon_connector_destroy,
988 .set_property = radeon_lvds_set_property, 999 .set_property = radeon_lvds_set_property,
989}; 1000};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
1111 .dpms = drm_helper_connector_dpms, 1122 .dpms = drm_helper_connector_dpms,
1112 .detect = radeon_vga_detect, 1123 .detect = radeon_vga_detect,
1113 .fill_modes = drm_helper_probe_single_connector_modes, 1124 .fill_modes = drm_helper_probe_single_connector_modes,
1125 .early_unregister = radeon_connector_unregister,
1114 .destroy = radeon_connector_destroy, 1126 .destroy = radeon_connector_destroy,
1115 .set_property = radeon_connector_set_property, 1127 .set_property = radeon_connector_set_property,
1116}; 1128};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
1188 .dpms = drm_helper_connector_dpms, 1200 .dpms = drm_helper_connector_dpms,
1189 .detect = radeon_tv_detect, 1201 .detect = radeon_tv_detect,
1190 .fill_modes = drm_helper_probe_single_connector_modes, 1202 .fill_modes = drm_helper_probe_single_connector_modes,
1203 .early_unregister = radeon_connector_unregister,
1191 .destroy = radeon_connector_destroy, 1204 .destroy = radeon_connector_destroy,
1192 .set_property = radeon_connector_set_property, 1205 .set_property = radeon_connector_set_property,
1193}; 1206};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
1519 .detect = radeon_dvi_detect, 1532 .detect = radeon_dvi_detect,
1520 .fill_modes = drm_helper_probe_single_connector_modes, 1533 .fill_modes = drm_helper_probe_single_connector_modes,
1521 .set_property = radeon_connector_set_property, 1534 .set_property = radeon_connector_set_property,
1535 .early_unregister = radeon_connector_unregister,
1522 .destroy = radeon_connector_destroy, 1536 .destroy = radeon_connector_destroy,
1523 .force = radeon_dvi_force, 1537 .force = radeon_dvi_force,
1524}; 1538};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1832 .detect = radeon_dp_detect, 1846 .detect = radeon_dp_detect,
1833 .fill_modes = drm_helper_probe_single_connector_modes, 1847 .fill_modes = drm_helper_probe_single_connector_modes,
1834 .set_property = radeon_connector_set_property, 1848 .set_property = radeon_connector_set_property,
1849 .early_unregister = radeon_connector_unregister,
1835 .destroy = radeon_connector_destroy, 1850 .destroy = radeon_connector_destroy,
1836 .force = radeon_dvi_force, 1851 .force = radeon_dvi_force,
1837}; 1852};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
1841 .detect = radeon_dp_detect, 1856 .detect = radeon_dp_detect,
1842 .fill_modes = drm_helper_probe_single_connector_modes, 1857 .fill_modes = drm_helper_probe_single_connector_modes,
1843 .set_property = radeon_lvds_set_property, 1858 .set_property = radeon_lvds_set_property,
1859 .early_unregister = radeon_connector_unregister,
1844 .destroy = radeon_connector_destroy, 1860 .destroy = radeon_connector_destroy,
1845 .force = radeon_dvi_force, 1861 .force = radeon_dvi_force,
1846}; 1862};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
1850 .detect = radeon_dp_detect, 1866 .detect = radeon_dp_detect,
1851 .fill_modes = drm_helper_probe_single_connector_modes, 1867 .fill_modes = drm_helper_probe_single_connector_modes,
1852 .set_property = radeon_lvds_set_property, 1868 .set_property = radeon_lvds_set_property,
1869 .early_unregister = radeon_connector_unregister,
1853 .destroy = radeon_connector_destroy, 1870 .destroy = radeon_connector_destroy,
1854 .force = radeon_dvi_force, 1871 .force = radeon_dvi_force,
1855}; 1872};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8ab30a7dd6d..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
1675 1675
1676void radeon_modeset_fini(struct radeon_device *rdev) 1676void radeon_modeset_fini(struct radeon_device *rdev)
1677{ 1677{
1678 radeon_fbdev_fini(rdev);
1679 kfree(rdev->mode_info.bios_hardcoded_edid);
1680
1681 /* free i2c buses */
1682 radeon_i2c_fini(rdev);
1683
1684 if (rdev->mode_info.mode_config_initialized) { 1678 if (rdev->mode_info.mode_config_initialized) {
1685 radeon_afmt_fini(rdev);
1686 drm_kms_helper_poll_fini(rdev->ddev); 1679 drm_kms_helper_poll_fini(rdev->ddev);
1687 radeon_hpd_fini(rdev); 1680 radeon_hpd_fini(rdev);
1688 drm_crtc_force_disable_all(rdev->ddev); 1681 drm_crtc_force_disable_all(rdev->ddev);
1682 radeon_fbdev_fini(rdev);
1683 radeon_afmt_fini(rdev);
1689 drm_mode_config_cleanup(rdev->ddev); 1684 drm_mode_config_cleanup(rdev->ddev);
1690 rdev->mode_info.mode_config_initialized = false; 1685 rdev->mode_info.mode_config_initialized = false;
1691 } 1686 }
1687
1688 kfree(rdev->mode_info.bios_hardcoded_edid);
1689
1690 /* free i2c buses */
1691 radeon_i2c_fini(rdev);
1692} 1692}
1693 1693
1694static bool is_hdtv_mode(const struct drm_display_mode *mode) 1694static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 91c8f4339566..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,9 +96,10 @@
96 * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI 96 * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen 97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen
98 * 2.47.0 - Add UVD_NO_OP register support 98 * 2.47.0 - Add UVD_NO_OP register support
99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
99 */ 100 */
100#define KMS_DRIVER_MAJOR 2 101#define KMS_DRIVER_MAJOR 2
101#define KMS_DRIVER_MINOR 47 102#define KMS_DRIVER_MINOR 48
102#define KMS_DRIVER_PATCHLEVEL 0 103#define KMS_DRIVER_PATCHLEVEL 0
103int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 104int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
104int radeon_driver_unload_kms(struct drm_device *dev); 105int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 021aa005623f..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
982{ 982{
983 if (!i2c) 983 if (!i2c)
984 return; 984 return;
985 WARN_ON(i2c->has_aux);
985 i2c_del_adapter(&i2c->adapter); 986 i2c_del_adapter(&i2c->adapter);
986 if (i2c->has_aux)
987 drm_dp_aux_unregister(&i2c->aux);
988 kfree(i2c); 987 kfree(i2c);
989} 988}
990 989
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index be30861afae9..41b72ce6613f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
446 446
447int radeon_bo_init(struct radeon_device *rdev) 447int radeon_bo_init(struct radeon_device *rdev)
448{ 448{
449 /* reserve PAT memory space to WC for VRAM */
450 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
451 rdev->mc.aper_size);
452
449 /* Add an MTRR for the VRAM */ 453 /* Add an MTRR for the VRAM */
450 if (!rdev->fastfb_working) { 454 if (!rdev->fastfb_working) {
451 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, 455 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
463{ 467{
464 radeon_ttm_fini(rdev); 468 radeon_ttm_fini(rdev);
465 arch_phys_wc_del(rdev->mc.vram_mtrr); 469 arch_phys_wc_del(rdev->mc.vram_mtrr);
470 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
466} 471}
467 472
468/* Returns how many bytes TTM can move per IB. 473/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; 566 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
567 struct page **pages = ttm->pages + pinned; 567 struct page **pages = ttm->pages + pinned;
568 568
569 r = get_user_pages(userptr, num_pages, write, 0, pages, NULL); 569 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
570 pages, NULL);
570 if (r < 0) 571 if (r < 0)
571 goto release_pages; 572 goto release_pages;
572 573
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7ee9aafbdf74..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
4431 case SPI_CONFIG_CNTL: 4431 case SPI_CONFIG_CNTL:
4432 case SPI_CONFIG_CNTL_1: 4432 case SPI_CONFIG_CNTL_1:
4433 case TA_CNTL_AUX: 4433 case TA_CNTL_AUX:
4434 case TA_CS_BC_BASE_ADDR:
4434 return true; 4435 return true;
4435 default: 4436 default:
4436 DRM_ERROR("Invalid register 0x%x in CS\n", reg); 4437 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index eb220eecba78..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
1145#define SPI_LB_CU_MASK 0x9354 1145#define SPI_LB_CU_MASK 0x9354
1146 1146
1147#define TA_CNTL_AUX 0x9508 1147#define TA_CNTL_AUX 0x9508
1148#define TA_CS_BC_BASE_ADDR 0x950C
1148 1149
1149#define CC_RB_BACKEND_DISABLE 0x98F4 1150#define CC_RB_BACKEND_DISABLE 0x98F4
1150#define BACKEND_DISABLE(x) ((x) << 16) 1151#define BACKEND_DISABLE(x) ((x) << 16)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages((unsigned long)xfer->mem_addr, 242 ret = get_user_pages((unsigned long)xfer->mem_addr,
243 vsg->num_pages, 243 vsg->num_pages,
244 (vsg->direction == DMA_FROM_DEVICE), 244 (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
245 0, vsg->pages, NULL); 245 vsg->pages, NULL);
246 246
247 up_read(&current->mm->mmap_sem); 247 up_read(&current->mm->mmap_sem);
248 if (ret != vsg->num_pages) { 248 if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
241 void *ptr); 241 void *ptr);
242 242
243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
244module_param_named(enable_fbdev, enable_fbdev, int, 0600); 244module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
246module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 246module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
248module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 248module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
250module_param_named(force_coherent, vmw_force_coherent, int, 0600); 250module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
255 255
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
43 43
44#define VMWGFX_DRIVER_DATE "20160210" 44#define VMWGFX_DRIVER_DATE "20160210"
45#define VMWGFX_DRIVER_MAJOR 2 45#define VMWGFX_DRIVER_MAJOR 2
46#define VMWGFX_DRIVER_MINOR 10 46#define VMWGFX_DRIVER_MINOR 11
47#define VMWGFX_DRIVER_PATCHLEVEL 0 47#define VMWGFX_DRIVER_PATCHLEVEL 0
48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
35#define VMW_RES_HT_ORDER 12 35#define VMW_RES_HT_ORDER 12
36 36
37/** 37/**
38 * enum vmw_resource_relocation_type - Relocation type for resources
39 *
40 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41 * command stream is replaced with the actual id after validation.
42 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43 * with a NOP.
44 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45 * after validation is -1, the command is replaced with a NOP. Otherwise no
46 * action.
47 */
48enum vmw_resource_relocation_type {
49 vmw_res_rel_normal,
50 vmw_res_rel_nop,
51 vmw_res_rel_cond_nop,
52 vmw_res_rel_max
53};
54
55/**
38 * struct vmw_resource_relocation - Relocation info for resources 56 * struct vmw_resource_relocation - Relocation info for resources
39 * 57 *
40 * @head: List head for the software context's relocation list. 58 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource. 59 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the 60 * @offset: Offset of single byte entries into the command buffer where the
43 * id that needs fixup is located. 61 * id that needs fixup is located.
62 * @rel_type: Type of relocation.
44 */ 63 */
45struct vmw_resource_relocation { 64struct vmw_resource_relocation {
46 struct list_head head; 65 struct list_head head;
47 const struct vmw_resource *res; 66 const struct vmw_resource *res;
48 unsigned long offset; 67 u32 offset:29;
68 enum vmw_resource_relocation_type rel_type:3;
49}; 69};
50 70
51/** 71/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo, 129 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob, 130 bool validate_as_mob,
111 uint32_t *p_val_node); 131 uint32_t *p_val_node);
112 132/**
133 * vmw_ptr_diff - Compute the offset from a to b in bytes
134 *
135 * @a: A starting pointer.
136 * @b: A pointer offset in the same address space.
137 *
138 * Returns: The offset in bytes between the two pointers.
139 */
140static size_t vmw_ptr_diff(void *a, void *b)
141{
142 return (unsigned long) b - (unsigned long) a;
143}
113 144
114/** 145/**
115 * vmw_resources_unreserve - unreserve resources previously reserved for 146 * vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
409 * @list: Pointer to head of relocation list. 440 * @list: Pointer to head of relocation list.
410 * @res: The resource. 441 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the 442 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes. 443 * id that needs fixup is located. Granularity is one byte.
444 * @rel_type: Relocation type.
413 */ 445 */
414static int vmw_resource_relocation_add(struct list_head *list, 446static int vmw_resource_relocation_add(struct list_head *list,
415 const struct vmw_resource *res, 447 const struct vmw_resource *res,
416 unsigned long offset) 448 unsigned long offset,
449 enum vmw_resource_relocation_type
450 rel_type)
417{ 451{
418 struct vmw_resource_relocation *rel; 452 struct vmw_resource_relocation *rel;
419 453
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
425 459
426 rel->res = res; 460 rel->res = res;
427 rel->offset = offset; 461 rel->offset = offset;
462 rel->rel_type = rel_type;
428 list_add_tail(&rel->head, list); 463 list_add_tail(&rel->head, list);
429 464
430 return 0; 465 return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
459{ 494{
460 struct vmw_resource_relocation *rel; 495 struct vmw_resource_relocation *rel;
461 496
497 /* Validate the struct vmw_resource_relocation member size */
498 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
499 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
500
462 list_for_each_entry(rel, list, head) { 501 list_for_each_entry(rel, list, head) {
463 if (likely(rel->res != NULL)) 502 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
464 cb[rel->offset] = rel->res->id; 503 switch (rel->rel_type) {
465 else 504 case vmw_res_rel_normal:
466 cb[rel->offset] = SVGA_3D_CMD_NOP; 505 *addr = rel->res->id;
506 break;
507 case vmw_res_rel_nop:
508 *addr = SVGA_3D_CMD_NOP;
509 break;
510 default:
511 if (rel->res->id == -1)
512 *addr = SVGA_3D_CMD_NOP;
513 break;
514 }
467 } 515 }
468} 516}
469 517
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
655 *p_val = NULL; 703 *p_val = NULL;
656 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 704 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657 res, 705 res,
658 id_loc - sw_context->buf_start); 706 vmw_ptr_diff(sw_context->buf_start,
707 id_loc),
708 vmw_res_rel_normal);
659 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
660 return ret; 710 return ret;
661 711
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
721 771
722 return vmw_resource_relocation_add 772 return vmw_resource_relocation_add
723 (&sw_context->res_relocations, res, 773 (&sw_context->res_relocations, res,
724 id_loc - sw_context->buf_start); 774 vmw_ptr_diff(sw_context->buf_start, id_loc),
775 vmw_res_rel_normal);
725 } 776 }
726 777
727 ret = vmw_user_resource_lookup_handle(dev_priv, 778 ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2143 return ret; 2194 return ret;
2144 2195
2145 return vmw_resource_relocation_add(&sw_context->res_relocations, 2196 return vmw_resource_relocation_add(&sw_context->res_relocations,
2146 NULL, &cmd->header.id - 2197 NULL,
2147 sw_context->buf_start); 2198 vmw_ptr_diff(sw_context->buf_start,
2148 2199 &cmd->header.id),
2149 return 0; 2200 vmw_res_rel_nop);
2150} 2201}
2151 2202
2152/** 2203/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2188 return ret; 2239 return ret;
2189 2240
2190 return vmw_resource_relocation_add(&sw_context->res_relocations, 2241 return vmw_resource_relocation_add(&sw_context->res_relocations,
2191 NULL, &cmd->header.id - 2242 NULL,
2192 sw_context->buf_start); 2243 vmw_ptr_diff(sw_context->buf_start,
2193 2244 &cmd->header.id),
2194 return 0; 2245 vmw_res_rel_nop);
2195} 2246}
2196 2247
2197/** 2248/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2848 * @header: Pointer to the command header in the command stream. 2899 * @header: Pointer to the command header in the command stream.
2849 * 2900 *
2850 * Check that the view exists, and if it was not created using this 2901 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that 2902 * command batch, conditionally make this command a NOP.
2852 * the remove command will not confuse the device.
2853 */ 2903 */
2854static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2904static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855 struct vmw_sw_context *sw_context, 2905 struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2877 return ret; 2927 return ret;
2878 2928
2879 /* 2929 /*
2880 * Add view to the validate list iff it was not created using this 2930 * If the view wasn't created during this command batch, it might
2881 * command batch. 2931 * have been removed due to a context swapout, so add a
2932 * relocation to conditionally make this command a NOP to avoid
2933 * device errors.
2882 */ 2934 */
2883 return vmw_view_res_val_add(sw_context, view); 2935 return vmw_resource_relocation_add(&sw_context->res_relocations,
2936 view,
2937 vmw_ptr_diff(sw_context->buf_start,
2938 &cmd->header.id),
2939 vmw_res_rel_cond_nop);
2884} 2940}
2885 2941
2886/** 2942/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3029 cmd->body.shaderResourceViewId); 3085 cmd->body.shaderResourceViewId);
3030} 3086}
3031 3087
3088/**
3089 * vmw_cmd_dx_transfer_from_buffer -
3090 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3091 *
3092 * @dev_priv: Pointer to a device private struct.
3093 * @sw_context: The software context being used for this batch.
3094 * @header: Pointer to the command header in the command stream.
3095 */
3096static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3097 struct vmw_sw_context *sw_context,
3098 SVGA3dCmdHeader *header)
3099{
3100 struct {
3101 SVGA3dCmdHeader header;
3102 SVGA3dCmdDXTransferFromBuffer body;
3103 } *cmd = container_of(header, typeof(*cmd), header);
3104 int ret;
3105
3106 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3107 user_surface_converter,
3108 &cmd->body.srcSid, NULL);
3109 if (ret != 0)
3110 return ret;
3111
3112 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3113 user_surface_converter,
3114 &cmd->body.destSid, NULL);
3115}
3116
3032static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3117static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3033 struct vmw_sw_context *sw_context, 3118 struct vmw_sw_context *sw_context,
3034 void *buf, uint32_t *size) 3119 void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3379 &vmw_cmd_buffer_copy_check, true, false, true), 3464 &vmw_cmd_buffer_copy_check, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3381 &vmw_cmd_pred_copy_check, true, false, true), 3466 &vmw_cmd_pred_copy_check, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3468 &vmw_cmd_dx_transfer_from_buffer,
3469 true, false, true),
3382}; 3470};
3383 3471
3384static int vmw_cmd_check(struct vmw_private *dev_priv, 3472static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3848 int ret; 3936 int ret;
3849 3937
3850 *header = NULL; 3938 *header = NULL;
3851 if (!dev_priv->cman || kernel_commands)
3852 return kernel_commands;
3853
3854 if (command_size > SVGA_CB_MAX_SIZE) { 3939 if (command_size > SVGA_CB_MAX_SIZE) {
3855 DRM_ERROR("Command buffer is too large.\n"); 3940 DRM_ERROR("Command buffer is too large.\n");
3856 return ERR_PTR(-EINVAL); 3941 return ERR_PTR(-EINVAL);
3857 } 3942 }
3858 3943
3944 if (!dev_priv->cman || kernel_commands)
3945 return kernel_commands;
3946
3859 /* If possible, add a little space for fencing. */ 3947 /* If possible, add a little space for fencing. */
3860 cmdbuf_size = command_size + 512; 3948 cmdbuf_size = command_size + 512;
3861 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 3949 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4232 ttm_bo_unref(&query_val.bo); 4320 ttm_bo_unref(&query_val.bo);
4233 ttm_bo_unref(&pinned_val.bo); 4321 ttm_bo_unref(&pinned_val.bo);
4234 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4322 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4235 DRM_INFO("Dummy query bo pin count: %d\n",
4236 dev_priv->dummy_query_bo->pin_count);
4237
4238out_unlock: 4323out_unlock:
4239 return; 4324 return;
4240 4325
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
575 long lret; 575 long lret;
576 576
577 if (nonblock) 577 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
578 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; 578 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
579
580 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
581 if (!lret) 579 if (!lret)
582 return -EBUSY; 580 return -EBUSY;
583 else if (lret < 0) 581 else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
324 if (res->id != -1) { 324 if (res->id != -1) {
325 325
326 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); 326 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
327 if (unlikely(cmd == NULL)) { 327 if (unlikely(!cmd)) {
328 DRM_ERROR("Failed reserving FIFO space for surface " 328 DRM_ERROR("Failed reserving FIFO space for surface "
329 "destruction.\n"); 329 "destruction.\n");
330 return; 330 return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
397 397
398 submit_size = vmw_surface_define_size(srf); 398 submit_size = vmw_surface_define_size(srf);
399 cmd = vmw_fifo_reserve(dev_priv, submit_size); 399 cmd = vmw_fifo_reserve(dev_priv, submit_size);
400 if (unlikely(cmd == NULL)) { 400 if (unlikely(!cmd)) {
401 DRM_ERROR("Failed reserving FIFO space for surface " 401 DRM_ERROR("Failed reserving FIFO space for surface "
402 "creation.\n"); 402 "creation.\n");
403 ret = -ENOMEM; 403 ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
446 uint8_t *cmd; 446 uint8_t *cmd;
447 struct vmw_private *dev_priv = res->dev_priv; 447 struct vmw_private *dev_priv = res->dev_priv;
448 448
449 BUG_ON(val_buf->bo == NULL); 449 BUG_ON(!val_buf->bo);
450
451 submit_size = vmw_surface_dma_size(srf); 450 submit_size = vmw_surface_dma_size(srf);
452 cmd = vmw_fifo_reserve(dev_priv, submit_size); 451 cmd = vmw_fifo_reserve(dev_priv, submit_size);
453 if (unlikely(cmd == NULL)) { 452 if (unlikely(!cmd)) {
454 DRM_ERROR("Failed reserving FIFO space for surface " 453 DRM_ERROR("Failed reserving FIFO space for surface "
455 "DMA.\n"); 454 "DMA.\n");
456 return -ENOMEM; 455 return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
538 537
539 submit_size = vmw_surface_destroy_size(); 538 submit_size = vmw_surface_destroy_size();
540 cmd = vmw_fifo_reserve(dev_priv, submit_size); 539 cmd = vmw_fifo_reserve(dev_priv, submit_size);
541 if (unlikely(cmd == NULL)) { 540 if (unlikely(!cmd)) {
542 DRM_ERROR("Failed reserving FIFO space for surface " 541 DRM_ERROR("Failed reserving FIFO space for surface "
543 "eviction.\n"); 542 "eviction.\n");
544 return -ENOMEM; 543 return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
578 int ret; 577 int ret;
579 struct vmw_resource *res = &srf->res; 578 struct vmw_resource *res = &srf->res;
580 579
581 BUG_ON(res_free == NULL); 580 BUG_ON(!res_free);
582 if (!dev_priv->has_mob) 581 if (!dev_priv->has_mob)
583 vmw_fifo_resource_inc(dev_priv); 582 vmw_fifo_resource_inc(dev_priv);
584 ret = vmw_resource_init(dev_priv, res, true, res_free, 583 ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
700 struct drm_vmw_surface_create_req *req = &arg->req; 699 struct drm_vmw_surface_create_req *req = &arg->req;
701 struct drm_vmw_surface_arg *rep = &arg->rep; 700 struct drm_vmw_surface_arg *rep = &arg->rep;
702 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 701 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
703 struct drm_vmw_size __user *user_sizes;
704 int ret; 702 int ret;
705 int i, j; 703 int i, j;
706 uint32_t cur_bo_offset; 704 uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
748 } 746 }
749 747
750 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 748 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
751 if (unlikely(user_srf == NULL)) { 749 if (unlikely(!user_srf)) {
752 ret = -ENOMEM; 750 ret = -ENOMEM;
753 goto out_no_user_srf; 751 goto out_no_user_srf;
754 } 752 }
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
763 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 761 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
764 srf->num_sizes = num_sizes; 762 srf->num_sizes = num_sizes;
765 user_srf->size = size; 763 user_srf->size = size;
766 764 srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
767 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); 765 req->size_addr,
768 if (unlikely(srf->sizes == NULL)) { 766 sizeof(*srf->sizes) * srf->num_sizes);
769 ret = -ENOMEM; 767 if (IS_ERR(srf->sizes)) {
768 ret = PTR_ERR(srf->sizes);
770 goto out_no_sizes; 769 goto out_no_sizes;
771 } 770 }
772 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), 771 srf->offsets = kmalloc_array(srf->num_sizes,
773 GFP_KERNEL); 772 sizeof(*srf->offsets),
774 if (unlikely(srf->offsets == NULL)) { 773 GFP_KERNEL);
774 if (unlikely(!srf->offsets)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto out_no_offsets; 776 goto out_no_offsets;
777 } 777 }
778 778
779 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
780 req->size_addr;
781
782 ret = copy_from_user(srf->sizes, user_sizes,
783 srf->num_sizes * sizeof(*srf->sizes));
784 if (unlikely(ret != 0)) {
785 ret = -EFAULT;
786 goto out_no_copy;
787 }
788
789 srf->base_size = *srf->sizes; 779 srf->base_size = *srf->sizes;
790 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 780 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
791 srf->multisample_count = 0; 781 srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
923 913
924 ret = -EINVAL; 914 ret = -EINVAL;
925 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 915 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
926 if (unlikely(base == NULL)) { 916 if (unlikely(!base)) {
927 DRM_ERROR("Could not find surface to reference.\n"); 917 DRM_ERROR("Could not find surface to reference.\n");
928 goto out_no_lookup; 918 goto out_no_lookup;
929 } 919 }
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1069 1059
1070 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1060 cmd = vmw_fifo_reserve(dev_priv, submit_len);
1071 cmd2 = (typeof(cmd2))cmd; 1061 cmd2 = (typeof(cmd2))cmd;
1072 if (unlikely(cmd == NULL)) { 1062 if (unlikely(!cmd)) {
1073 DRM_ERROR("Failed reserving FIFO space for surface " 1063 DRM_ERROR("Failed reserving FIFO space for surface "
1074 "creation.\n"); 1064 "creation.\n");
1075 ret = -ENOMEM; 1065 ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
1135 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); 1125 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1136 1126
1137 cmd1 = vmw_fifo_reserve(dev_priv, submit_size); 1127 cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1138 if (unlikely(cmd1 == NULL)) { 1128 if (unlikely(!cmd1)) {
1139 DRM_ERROR("Failed reserving FIFO space for surface " 1129 DRM_ERROR("Failed reserving FIFO space for surface "
1140 "binding.\n"); 1130 "binding.\n");
1141 return -ENOMEM; 1131 return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
1185 1175
1186 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1176 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1187 cmd = vmw_fifo_reserve(dev_priv, submit_size); 1177 cmd = vmw_fifo_reserve(dev_priv, submit_size);
1188 if (unlikely(cmd == NULL)) { 1178 if (unlikely(!cmd)) {
1189 DRM_ERROR("Failed reserving FIFO space for surface " 1179 DRM_ERROR("Failed reserving FIFO space for surface "
1190 "unbinding.\n"); 1180 "unbinding.\n");
1191 return -ENOMEM; 1181 return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1244 vmw_binding_res_list_scrub(&res->binding_head); 1234 vmw_binding_res_list_scrub(&res->binding_head);
1245 1235
1246 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1236 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1247 if (unlikely(cmd == NULL)) { 1237 if (unlikely(!cmd)) {
1248 DRM_ERROR("Failed reserving FIFO space for surface " 1238 DRM_ERROR("Failed reserving FIFO space for surface "
1249 "destruction.\n"); 1239 "destruction.\n");
1250 mutex_unlock(&dev_priv->binding_mutex); 1240 mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1410 1400
1411 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1401 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1412 srf = &user_srf->srf; 1402 srf = &user_srf->srf;
1413 if (srf->res.backup == NULL) { 1403 if (!srf->res.backup) {
1414 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1404 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1415 goto out_bad_resource; 1405 goto out_bad_resource;
1416 } 1406 }
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1524 } 1514 }
1525 1515
1526 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 1516 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1527 if (unlikely(user_srf == NULL)) { 1517 if (unlikely(!user_srf)) {
1528 ret = -ENOMEM; 1518 ret = -ENOMEM;
1529 goto out_no_user_srf; 1519 goto out_no_user_srf;
1530 } 1520 }
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf77f264..818ea7d93533 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
234 0xC0 /* End Collection */ 234 0xC0 /* End Collection */
235}; 235};
236 236
237static __u8 pid0006_rdesc_fixed[] = {
238 0x05, 0x01, /* Usage Page (Generic Desktop) */
239 0x09, 0x04, /* Usage (Joystick) */
240 0xA1, 0x01, /* Collection (Application) */
241 0xA1, 0x02, /* Collection (Logical) */
242 0x75, 0x08, /* Report Size (8) */
243 0x95, 0x05, /* Report Count (5) */
244 0x15, 0x00, /* Logical Minimum (0) */
245 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
246 0x35, 0x00, /* Physical Minimum (0) */
247 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
248 0x09, 0x30, /* Usage (X) */
249 0x09, 0x33, /* Usage (Ry) */
250 0x09, 0x32, /* Usage (Z) */
251 0x09, 0x31, /* Usage (Y) */
252 0x09, 0x34, /* Usage (Ry) */
253 0x81, 0x02, /* Input (Variable) */
254 0x75, 0x04, /* Report Size (4) */
255 0x95, 0x01, /* Report Count (1) */
256 0x25, 0x07, /* Logical Maximum (7) */
257 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
258 0x65, 0x14, /* Unit (Centimeter) */
259 0x09, 0x39, /* Usage (Hat switch) */
260 0x81, 0x42, /* Input (Variable) */
261 0x65, 0x00, /* Unit (None) */
262 0x75, 0x01, /* Report Size (1) */
263 0x95, 0x0C, /* Report Count (12) */
264 0x25, 0x01, /* Logical Maximum (1) */
265 0x45, 0x01, /* Physical Maximum (1) */
266 0x05, 0x09, /* Usage Page (Button) */
267 0x19, 0x01, /* Usage Minimum (0x01) */
268 0x29, 0x0C, /* Usage Maximum (0x0C) */
269 0x81, 0x02, /* Input (Variable) */
270 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */
271 0x75, 0x01, /* Report Size (1) */
272 0x95, 0x08, /* Report Count (8) */
273 0x25, 0x01, /* Logical Maximum (1) */
274 0x45, 0x01, /* Physical Maximum (1) */
275 0x09, 0x01, /* Usage (0x01) */
276 0x81, 0x02, /* Input (Variable) */
277 0xC0, /* End Collection */
278 0xA1, 0x02, /* Collection (Logical) */
279 0x75, 0x08, /* Report Size (8) */
280 0x95, 0x07, /* Report Count (7) */
281 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
282 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
283 0x09, 0x02, /* Usage (0x02) */
284 0x91, 0x02, /* Output (Variable) */
285 0xC0, /* End Collection */
286 0xC0 /* End Collection */
287};
288
289static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, 237static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
290 unsigned int *rsize) 238 unsigned int *rsize)
291{ 239{
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
296 *rsize = sizeof(pid0011_rdesc_fixed); 244 *rsize = sizeof(pid0011_rdesc_fixed);
297 } 245 }
298 break; 246 break;
299 case 0x0006:
300 if (*rsize == sizeof(pid0006_rdesc_fixed)) {
301 rdesc = pid0006_rdesc_fixed;
302 *rsize = sizeof(pid0006_rdesc_fixed);
303 }
304 break;
305 } 247 }
306 return rdesc; 248 return rdesc;
307} 249}
308 250
251#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
252#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
253
254static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
255 struct hid_field *field, struct hid_usage *usage,
256 unsigned long **bit, int *max)
257{
258 switch (usage->hid) {
259 /*
260 * revert to the old hid-input behavior where axes
261 * can be randomly assigned when hid->usage is reused.
262 */
263 case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
264 case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
265 if (field->flags & HID_MAIN_ITEM_RELATIVE)
266 map_rel(usage->hid & 0xf);
267 else
268 map_abs(usage->hid & 0xf);
269 return 1;
270 }
271
272 return 0;
273}
274
309static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) 275static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
310{ 276{
311 int ret; 277 int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
352 .id_table = dr_devices, 318 .id_table = dr_devices,
353 .report_fixup = dr_report_fixup, 319 .report_fixup = dr_report_fixup,
354 .probe = dr_probe, 320 .probe = dr_probe,
321 .input_mapping = dr_input_mapping,
355}; 322};
356module_hid_driver(dr_driver); 323module_hid_driver(dr_driver);
357 324
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd59c79eebdd..6cfb5cacc253 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
64#define USB_VENDOR_ID_AKAI 0x2011 64#define USB_VENDOR_ID_AKAI 0x2011
65#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715 65#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
66 66
67#define USB_VENDOR_ID_AKAI_09E8 0x09E8
68#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
69
67#define USB_VENDOR_ID_ALCOR 0x058f 70#define USB_VENDOR_ID_ALCOR 0x058f
68#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 71#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
69 72
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f37b4f5..d3e1ab162f7c 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@ struct hidled_device {
100 const struct hidled_config *config; 100 const struct hidled_config *config;
101 struct hid_device *hdev; 101 struct hid_device *hdev;
102 struct hidled_rgb *rgb; 102 struct hidled_rgb *rgb;
103 u8 *buf;
103 struct mutex lock; 104 struct mutex lock;
104}; 105};
105 106
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
118 119
119 mutex_lock(&ldev->lock); 120 mutex_lock(&ldev->lock);
120 121
122 /*
123 * buffer provided to hid_hw_raw_request must not be on the stack
124 * and must not be part of a data structure
125 */
126 memcpy(ldev->buf, buf, ldev->config->report_size);
127
121 if (ldev->config->report_type == RAW_REQUEST) 128 if (ldev->config->report_type == RAW_REQUEST)
122 ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, 129 ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
123 ldev->config->report_size, 130 ldev->config->report_size,
124 HID_FEATURE_REPORT, 131 HID_FEATURE_REPORT,
125 HID_REQ_SET_REPORT); 132 HID_REQ_SET_REPORT);
126 else if (ldev->config->report_type == OUTPUT_REPORT) 133 else if (ldev->config->report_type == OUTPUT_REPORT)
127 ret = hid_hw_output_report(ldev->hdev, buf, 134 ret = hid_hw_output_report(ldev->hdev, ldev->buf,
128 ldev->config->report_size); 135 ldev->config->report_size);
129 else 136 else
130 ret = -EINVAL; 137 ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
147 154
148 mutex_lock(&ldev->lock); 155 mutex_lock(&ldev->lock);
149 156
150 ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, 157 memcpy(ldev->buf, buf, ldev->config->report_size);
158
159 ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
151 ldev->config->report_size, 160 ldev->config->report_size,
152 HID_FEATURE_REPORT, 161 HID_FEATURE_REPORT,
153 HID_REQ_SET_REPORT); 162 HID_REQ_SET_REPORT);
154 if (ret < 0) 163 if (ret < 0)
155 goto err; 164 goto err;
156 165
157 ret = hid_hw_raw_request(ldev->hdev, buf[0], buf, 166 ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
158 ldev->config->report_size, 167 ldev->config->report_size,
159 HID_FEATURE_REPORT, 168 HID_FEATURE_REPORT,
160 HID_REQ_GET_REPORT); 169 HID_REQ_GET_REPORT);
170
171 memcpy(buf, ldev->buf, ldev->config->report_size);
161err: 172err:
162 mutex_unlock(&ldev->lock); 173 mutex_unlock(&ldev->lock);
163 174
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
447 if (!ldev) 458 if (!ldev)
448 return -ENOMEM; 459 return -ENOMEM;
449 460
461 ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
462 if (!ldev->buf)
463 return -ENOMEM;
464
450 ret = hid_parse(hdev); 465 ret = hid_parse(hdev);
451 if (ret) 466 if (ret)
452 return ret; 467 return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5da47d..354d49ea36dd 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
56 56
57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, 57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, 58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
59 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, 60 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
60 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, 61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 98114cef1e43..2fe1828bd10b 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
194 * 0.5'C per two measurement cycles thus ignore possible 194 * 0.5'C per two measurement cycles thus ignore possible
195 * but unlikely aliasing error on lsb reading. --Grant 195 * but unlikely aliasing error on lsb reading. --Grant
196 */ 196 */
197 data->temp = ((i2c_smbus_read_byte_data(client, 197 data->temp = (i2c_smbus_read_byte_data(client,
198 ADM9240_REG_TEMP) << 8) | 198 ADM9240_REG_TEMP) << 8) |
199 i2c_smbus_read_byte_data(client, 199 i2c_smbus_read_byte_data(client,
200 ADM9240_REG_TEMP_CONF)) / 128; 200 ADM9240_REG_TEMP_CONF);
201 201
202 for (i = 0; i < 2; i++) { /* read fans */ 202 for (i = 0; i < 2; i++) { /* read fans */
203 data->fan[i] = i2c_smbus_read_byte_data(client, 203 data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
263 char *buf) 263 char *buf)
264{ 264{
265 struct adm9240_data *data = adm9240_update_device(dev); 265 struct adm9240_data *data = adm9240_update_device(dev);
266 return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */ 266 return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
267} 267}
268 268
269static ssize_t show_max(struct device *dev, struct device_attribute *devattr, 269static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index bef84e085973..c1b9275978f9 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
268 long *val) 268 long *val)
269{ 269{
270 struct max31790_data *data = max31790_update_device(dev); 270 struct max31790_data *data = max31790_update_device(dev);
271 u8 fan_config = data->fan_config[channel]; 271 u8 fan_config;
272 272
273 if (IS_ERR(data)) 273 if (IS_ERR(data))
274 return PTR_ERR(data); 274 return PTR_ERR(data);
275 275
276 fan_config = data->fan_config[channel];
277
276 switch (attr) { 278 switch (attr) {
277 case hwmon_pwm_input: 279 case hwmon_pwm_input:
278 *val = data->pwm[channel] >> 8; 280 *val = data->pwm[channel] >> 8;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6d94e2ec5b4f..d252276feadf 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,12 +79,12 @@ config I2C_AMD8111
79 79
80config I2C_HIX5HD2 80config I2C_HIX5HD2
81 tristate "Hix5hd2 high-speed I2C driver" 81 tristate "Hix5hd2 high-speed I2C driver"
82 depends on ARCH_HIX5HD2 || COMPILE_TEST 82 depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
83 help 83 help
84 Say Y here to include support for high-speed I2C controller in the 84 Say Y here to include support for the high-speed I2C controller
85 Hisilicon based hix5hd2 SoCs. 85 used in HiSilicon hix5hd2 SoCs.
86 86
87 This driver can also be built as a module. If so, the module 87 This driver can also be built as a module. If so, the module
88 will be called i2c-hix5hd2. 88 will be called i2c-hix5hd2.
89 89
90config I2C_I801 90config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
589 589
590config I2C_IMX 590config I2C_IMX
591 tristate "IMX I2C interface" 591 tristate "IMX I2C interface"
592 depends on ARCH_MXC || ARCH_LAYERSCAPE 592 depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
593 help 593 help
594 Say Y here if you want to use the IIC bus controller on 594 Say Y here if you want to use the IIC bus controller on
595 the Freescale i.MX/MXC or Layerscape processors. 595 the Freescale i.MX/MXC, Layerscape or ColdFire processors.
596 596
597 This driver can also be built as a module. If so, the module 597 This driver can also be built as a module. If so, the module
598 will be called i2c-imx. 598 will be called i2c-imx.
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 1fe93c43215c..11e866d05368 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -95,6 +95,9 @@
95#define DW_IC_STATUS_TFE BIT(2) 95#define DW_IC_STATUS_TFE BIT(2)
96#define DW_IC_STATUS_MST_ACTIVITY BIT(5) 96#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
97 97
98#define DW_IC_SDA_HOLD_RX_SHIFT 16
99#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
100
98#define DW_IC_ERR_TX_ABRT 0x1 101#define DW_IC_ERR_TX_ABRT 0x1
99 102
100#define DW_IC_TAR_10BITADDR_MASTER BIT(12) 103#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
@@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
420 /* Configure SDA Hold Time if required */ 423 /* Configure SDA Hold Time if required */
421 reg = dw_readl(dev, DW_IC_COMP_VERSION); 424 reg = dw_readl(dev, DW_IC_COMP_VERSION);
422 if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { 425 if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
423 if (dev->sda_hold_time) { 426 if (!dev->sda_hold_time) {
424 dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
425 } else {
426 /* Keep previous hold time setting if no one set it */ 427 /* Keep previous hold time setting if no one set it */
427 dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD); 428 dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
428 } 429 }
430 /*
431 * Workaround for avoiding TX arbitration lost in case I2C
432 * slave pulls SDA down "too quickly" after falling egde of
433 * SCL by enabling non-zero SDA RX hold. Specification says it
434 * extends incoming SDA low to high transition while SCL is
435 * high but it apprears to help also above issue.
436 */
437 if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
438 dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
439 dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
429 } else { 440 } else {
430 dev_warn(dev->dev, 441 dev_warn(dev->dev,
431 "Hardware too old to adjust SDA hold time.\n"); 442 "Hardware too old to adjust SDA hold time.\n");
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 9604024e0eb0..49f2084f7bb5 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
368 { .compatible = "cnxt,cx92755-i2c" }, 368 { .compatible = "cnxt,cx92755-i2c" },
369 { }, 369 { },
370}; 370};
371MODULE_DEVICE_TABLE(of, dc_i2c_match);
371 372
372static struct platform_driver dc_i2c_driver = { 373static struct platform_driver dc_i2c_driver = {
373 .probe = dc_i2c_probe, 374 .probe = dc_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 08847e8b8998..eb3627f35d12 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -146,6 +146,7 @@
146#define SMBHSTCFG_HST_EN 1 146#define SMBHSTCFG_HST_EN 1
147#define SMBHSTCFG_SMB_SMI_EN 2 147#define SMBHSTCFG_SMB_SMI_EN 2
148#define SMBHSTCFG_I2C_EN 4 148#define SMBHSTCFG_I2C_EN 4
149#define SMBHSTCFG_SPD_WD 0x10
149 150
150/* TCO configuration bits for TCOCTL */ 151/* TCO configuration bits for TCOCTL */
151#define TCOCTL_EN 0x0100 152#define TCOCTL_EN 0x0100
@@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
865 block = 1; 866 block = 1;
866 break; 867 break;
867 case I2C_SMBUS_I2C_BLOCK_DATA: 868 case I2C_SMBUS_I2C_BLOCK_DATA:
868 /* NB: page 240 of ICH5 datasheet shows that the R/#W 869 /*
869 * bit should be cleared here, even when reading */ 870 * NB: page 240 of ICH5 datasheet shows that the R/#W
870 outb_p((addr & 0x7f) << 1, SMBHSTADD(priv)); 871 * bit should be cleared here, even when reading.
872 * However if SPD Write Disable is set (Lynx Point and later),
873 * the read will fail if we don't set the R/#W bit.
874 */
875 outb_p(((addr & 0x7f) << 1) |
876 ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
877 (read_write & 0x01) : 0),
878 SMBHSTADD(priv));
871 if (read_write == I2C_SMBUS_READ) { 879 if (read_write == I2C_SMBUS_READ) {
872 /* NB: page 240 of ICH5 datasheet also shows 880 /* NB: page 240 of ICH5 datasheet also shows
873 * that DATA1 is the cmd field when reading */ 881 * that DATA1 is the cmd field when reading */
@@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1573 /* Disable SMBus interrupt feature if SMBus using SMI# */ 1581 /* Disable SMBus interrupt feature if SMBus using SMI# */
1574 priv->features &= ~FEATURE_IRQ; 1582 priv->features &= ~FEATURE_IRQ;
1575 } 1583 }
1584 if (temp & SMBHSTCFG_SPD_WD)
1585 dev_info(&dev->dev, "SPD Write Disable is set\n");
1576 1586
1577 /* Clear special mode bits */ 1587 /* Clear special mode bits */
1578 if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER)) 1588 if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 592a8f26a708..47fc1f1acff7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1009 rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0); 1009 rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
1010 rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0); 1010 rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
1011 1011
1012 if (!gpio_is_valid(rinfo->sda_gpio) || 1012 if (rinfo->sda_gpio == -EPROBE_DEFER ||
1013 !gpio_is_valid(rinfo->scl_gpio) || 1013 rinfo->scl_gpio == -EPROBE_DEFER) {
1014 IS_ERR(i2c_imx->pinctrl_pins_default) || 1014 return -EPROBE_DEFER;
1015 IS_ERR(i2c_imx->pinctrl_pins_gpio)) { 1015 } else if (!gpio_is_valid(rinfo->sda_gpio) ||
1016 !gpio_is_valid(rinfo->scl_gpio) ||
1017 IS_ERR(i2c_imx->pinctrl_pins_default) ||
1018 IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
1016 dev_dbg(&pdev->dev, "recovery information incomplete\n"); 1019 dev_dbg(&pdev->dev, "recovery information incomplete\n");
1017 return 0; 1020 return 0;
1018 } 1021 }
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index b8ea62105f42..30132c3957cd 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
729 { .compatible = "ingenic,jz4780-i2c", }, 729 { .compatible = "ingenic,jz4780-i2c", },
730 { /* sentinel */ } 730 { /* sentinel */ }
731}; 731};
732MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
732 733
733static int jz4780_i2c_probe(struct platform_device *pdev) 734static int jz4780_i2c_probe(struct platform_device *pdev)
734{ 735{
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 50702c7bb244..df220666d627 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
694 t_calc->div_low--; 694 t_calc->div_low--;
695 t_calc->div_high--; 695 t_calc->div_high--;
696 696
697 /* Give the tuning value 0, that would not update con register */
698 t_calc->tuning = 0;
697 /* Maximum divider supported by hw is 0xffff */ 699 /* Maximum divider supported by hw is 0xffff */
698 if (t_calc->div_low > 0xffff) { 700 if (t_calc->div_low > 0xffff) {
699 t_calc->div_low = 0xffff; 701 t_calc->div_low = 0xffff;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 263685c7a512..05cf192ef1ac 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
105 struct mbox_chan *mbox_chan; 105 struct mbox_chan *mbox_chan;
106 struct mbox_client mbox_client; 106 struct mbox_client mbox_client;
107 struct completion rd_complete; 107 struct completion rd_complete;
108 u8 dma_buffer[I2C_SMBUS_BLOCK_MAX]; 108 u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
109 u32 *resp_msg; 109 u32 *resp_msg;
110}; 110};
111 111
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 2a972ed7aa0d..e29ff37a43bd 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
426 { .compatible = "netlogic,xlp980-i2c", }, 426 { .compatible = "netlogic,xlp980-i2c", },
427 { /* sentinel */ }, 427 { /* sentinel */ },
428}; 428};
429MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
429 430
430#ifdef CONFIG_ACPI 431#ifdef CONFIG_ACPI
431static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = { 432static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 0968f59b6df5..ad17d88d8573 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
358 }, 358 },
359 { } 359 { }
360}; 360};
361MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
361 362
362static int xlr_i2c_probe(struct platform_device *pdev) 363static int xlr_i2c_probe(struct platform_device *pdev)
363{ 364{
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 5ab67219f71e..1704fc84d647 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1681,6 +1681,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
1681static void of_i2c_register_devices(struct i2c_adapter *adap) 1681static void of_i2c_register_devices(struct i2c_adapter *adap)
1682{ 1682{
1683 struct device_node *bus, *node; 1683 struct device_node *bus, *node;
1684 struct i2c_client *client;
1684 1685
1685 /* Only register child devices if the adapter has a node pointer set */ 1686 /* Only register child devices if the adapter has a node pointer set */
1686 if (!adap->dev.of_node) 1687 if (!adap->dev.of_node)
@@ -1695,7 +1696,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
1695 for_each_available_child_of_node(bus, node) { 1696 for_each_available_child_of_node(bus, node) {
1696 if (of_node_test_and_set_flag(node, OF_POPULATED)) 1697 if (of_node_test_and_set_flag(node, OF_POPULATED))
1697 continue; 1698 continue;
1698 of_i2c_register_device(adap, node); 1699
1700 client = of_i2c_register_device(adap, node);
1701 if (IS_ERR(client)) {
1702 dev_warn(&adap->dev,
1703 "Failed to create I2C device for %s\n",
1704 node->full_name);
1705 of_node_clear_flag(node, OF_POPULATED);
1706 }
1699 } 1707 }
1700 1708
1701 of_node_put(bus); 1709 of_node_put(bus);
@@ -2299,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
2299 if (IS_ERR(client)) { 2307 if (IS_ERR(client)) {
2300 dev_err(&adap->dev, "failed to create client for '%s'\n", 2308 dev_err(&adap->dev, "failed to create client for '%s'\n",
2301 rd->dn->full_name); 2309 rd->dn->full_name);
2310 of_node_clear_flag(rd->dn, OF_POPULATED);
2302 return notifier_from_errno(PTR_ERR(client)); 2311 return notifier_from_errno(PTR_ERR(client));
2303 } 2312 }
2304 break; 2313 break;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..224ad274ea0b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
94 unsigned long dma_attrs = 0; 94 unsigned long dma_attrs = 0;
95 struct scatterlist *sg, *sg_list_start; 95 struct scatterlist *sg, *sg_list_start;
96 int need_release = 0; 96 int need_release = 0;
97 unsigned int gup_flags = FOLL_WRITE;
97 98
98 if (dmasync) 99 if (dmasync)
99 dma_attrs |= DMA_ATTR_WRITE_BARRIER; 100 dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
183 if (ret) 184 if (ret)
184 goto out; 185 goto out;
185 186
187 if (!umem->writable)
188 gup_flags |= FOLL_FORCE;
189
186 need_release = 1; 190 need_release = 1;
187 sg_list_start = umem->sg_head.sgl; 191 sg_list_start = umem->sg_head.sgl;
188 192
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
190 ret = get_user_pages(cur_base, 194 ret = get_user_pages(cur_base,
191 min_t(unsigned long, npages, 195 min_t(unsigned long, npages,
192 PAGE_SIZE / sizeof (struct page *)), 196 PAGE_SIZE / sizeof (struct page *)),
193 1, !umem->writable, page_list, vma_list); 197 gup_flags, page_list, vma_list);
194 198
195 if (ret < 0) 199 if (ret < 0)
196 goto out; 200 goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
527 u64 off; 527 u64 off;
528 int j, k, ret = 0, start_idx, npages = 0; 528 int j, k, ret = 0, start_idx, npages = 0;
529 u64 base_virt_addr; 529 u64 base_virt_addr;
530 unsigned int flags = 0;
530 531
531 if (access_mask == 0) 532 if (access_mask == 0)
532 return -EINVAL; 533 return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
556 goto out_put_task; 557 goto out_put_task;
557 } 558 }
558 559
560 if (access_mask & ODP_WRITE_ALLOWED_BIT)
561 flags |= FOLL_WRITE;
562
559 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; 563 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
560 k = start_idx; 564 k = start_idx;
561 565
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
574 */ 578 */
575 npages = get_user_pages_remote(owning_process, owning_mm, 579 npages = get_user_pages_remote(owning_process, owning_mm,
576 user_virt, gup_num_pages, 580 user_virt, gup_num_pages,
577 access_mask & ODP_WRITE_ALLOWED_BIT, 581 flags, local_page_list, NULL);
578 0, local_page_list, NULL);
579 up_read(&owning_mm->mmap_sem); 582 up_read(&owning_mm->mmap_sem);
580 583
581 if (npages < 0) 584 if (npages < 0)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
472 goto out; 472 goto out;
473 } 473 }
474 474
475 ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); 475 ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
476 if (ret < 0) 476 if (ret < 0)
477 goto out; 477 goto out;
478 478
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94fd3633..75f08624ac05 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
67 67
68 for (got = 0; got < num_pages; got += ret) { 68 for (got = 0; got < num_pages; got += ret) {
69 ret = get_user_pages(start_page + got * PAGE_SIZE, 69 ret = get_user_pages(start_page + got * PAGE_SIZE,
70 num_pages - got, 1, 1, 70 num_pages - got,
71 FOLL_WRITE | FOLL_FORCE,
71 p + got, NULL); 72 p + got, NULL);
72 if (ret < 0) 73 if (ret < 0)
73 goto bail_release; 74 goto bail_release;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebee4d8a..1ccee6ea5bc3 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
111 int i; 111 int i;
112 int flags; 112 int flags;
113 dma_addr_t pa; 113 dma_addr_t pa;
114 unsigned int gup_flags;
114 115
115 if (!can_do_mlock()) 116 if (!can_do_mlock())
116 return -EPERM; 117 return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
135 136
136 flags = IOMMU_READ | IOMMU_CACHE; 137 flags = IOMMU_READ | IOMMU_CACHE;
137 flags |= (writable) ? IOMMU_WRITE : 0; 138 flags |= (writable) ? IOMMU_WRITE : 0;
139 gup_flags = FOLL_WRITE;
140 gup_flags |= (writable) ? 0 : FOLL_FORCE;
138 cur_base = addr & PAGE_MASK; 141 cur_base = addr & PAGE_MASK;
139 ret = 0; 142 ret = 0;
140 143
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
142 ret = get_user_pages(cur_base, 145 ret = get_user_pages(cur_base,
143 min_t(unsigned long, npages, 146 min_t(unsigned long, npages,
144 PAGE_SIZE / sizeof(struct page *)), 147 PAGE_SIZE / sizeof(struct page *)),
145 1, !writable, page_list, NULL); 148 gup_flags, page_list, NULL);
146 149
147 if (ret < 0) 150 if (ret < 0)
148 goto out; 151 goto out;
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index c0e7b624ce54..12102448fddd 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
178 idev->id_vendor, idev->id_device); 178 idev->id_vendor, idev->id_device);
179} 179}
180 180
181ipack_device_attr(id_format, "0x%hhu\n"); 181ipack_device_attr(id_format, "0x%hhx\n");
182 182
183static DEVICE_ATTR_RO(id); 183static DEVICE_ATTR_RO(id);
184static DEVICE_ATTR_RO(id_device); 184static DEVICE_ATTR_RO(id_device);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 82b0b5daf3f5..bc0af3307bbf 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -158,8 +158,8 @@ config PIC32_EVIC
158 select IRQ_DOMAIN 158 select IRQ_DOMAIN
159 159
160config JCORE_AIC 160config JCORE_AIC
161 bool "J-Core integrated AIC" 161 bool "J-Core integrated AIC" if COMPILE_TEST
162 depends on OF && (SUPERH || COMPILE_TEST) 162 depends on OF
163 select IRQ_DOMAIN 163 select IRQ_DOMAIN
164 help 164 help
165 Support for the J-Core integrated AIC. 165 Support for the J-Core integrated AIC.
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e4304b7..2a7a38830a8d 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
85 nps_ack_gic(); 85 nps_ack_gic();
86} 86}
87 87
88static void nps400_irq_eoi(struct irq_data *irqd) 88static void nps400_irq_ack(struct irq_data *irqd)
89{ 89{
90 unsigned int __maybe_unused irq = irqd_to_hwirq(irqd); 90 unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
91 91
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
103 .name = "NPS400 IC", 103 .name = "NPS400 IC",
104 .irq_mask = nps400_irq_mask, 104 .irq_mask = nps400_irq_mask,
105 .irq_unmask = nps400_irq_unmask, 105 .irq_unmask = nps400_irq_unmask,
106 .irq_eoi = nps400_irq_eoi, 106 .irq_ack = nps400_irq_ack,
107}; 107};
108 108
109static int nps400_irq_map(struct irq_domain *d, unsigned int virq, 109static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
@@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = {
135static int __init nps400_of_init(struct device_node *node, 135static int __init nps400_of_init(struct device_node *node,
136 struct device_node *parent) 136 struct device_node *parent)
137{ 137{
138 static struct irq_domain *nps400_root_domain; 138 struct irq_domain *nps400_root_domain;
139 139
140 if (parent) { 140 if (parent) {
141 pr_err("DeviceTree incore ic not a root irq controller\n"); 141 pr_err("DeviceTree incore ic not a root irq controller\n");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 003495d91f9c..c5dee300e8a3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its)
1023 1023
1024static int its_alloc_tables(struct its_node *its) 1024static int its_alloc_tables(struct its_node *its)
1025{ 1025{
1026 u64 typer = readq_relaxed(its->base + GITS_TYPER); 1026 u64 typer = gic_read_typer(its->base + GITS_TYPER);
1027 u32 ids = GITS_TYPER_DEVBITS(typer); 1027 u32 ids = GITS_TYPER_DEVBITS(typer);
1028 u64 shr = GITS_BASER_InnerShareable; 1028 u64 shr = GITS_BASER_InnerShareable;
1029 u64 cache = GITS_BASER_WaWb; 1029 u64 cache = GITS_BASER_WaWb;
@@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void)
1198 * We now have to bind each collection to its target 1198 * We now have to bind each collection to its target
1199 * redistributor. 1199 * redistributor.
1200 */ 1200 */
1201 if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { 1201 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1202 /* 1202 /*
1203 * This ITS wants the physical address of the 1203 * This ITS wants the physical address of the
1204 * redistributor. 1204 * redistributor.
@@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void)
1208 /* 1208 /*
1209 * This ITS wants a linear CPU number. 1209 * This ITS wants a linear CPU number.
1210 */ 1210 */
1211 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); 1211 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
1212 target = GICR_TYPER_CPU_NUMBER(target) << 16; 1212 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1213 } 1213 }
1214 1214
@@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res,
1691 INIT_LIST_HEAD(&its->its_device_list); 1691 INIT_LIST_HEAD(&its->its_device_list);
1692 its->base = its_base; 1692 its->base = its_base;
1693 its->phys_base = res->start; 1693 its->phys_base = res->start;
1694 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1694 its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1695 its->numa_node = numa_node; 1695 its->numa_node = numa_node;
1696 1696
1697 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); 1697 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1763,7 +1763,7 @@ out_unmap:
1763 1763
1764static bool gic_rdists_supports_plpis(void) 1764static bool gic_rdists_supports_plpis(void)
1765{ 1765{
1766 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); 1766 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1767} 1767}
1768 1768
1769int its_cpu_init(void) 1769int its_cpu_init(void)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9b81bd8b929c..19d642eae096 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
153 return; /* No PM support in this redistributor */ 153 return; /* No PM support in this redistributor */
154 } 154 }
155 155
156 while (count--) { 156 while (--count) {
157 val = readl_relaxed(rbase + GICR_WAKER); 157 val = readl_relaxed(rbase + GICR_WAKER);
158 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) 158 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
159 break; 159 break;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58e5b4e87056..d6c404b3584d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1279 */ 1279 */
1280 *base += 0xf000; 1280 *base += 0xf000;
1281 cpuif_res.start += 0xf000; 1281 cpuif_res.start += 0xf000;
1282 pr_warn("GIC: Adjusting CPU interface base to %pa", 1282 pr_warn("GIC: Adjusting CPU interface base to %pa\n",
1283 &cpuif_res.start); 1283 &cpuif_res.start);
1284 } 1284 }
1285 1285
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 84b01dec277d..033bccb41455 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -25,12 +25,30 @@
25 25
26static struct irq_chip jcore_aic; 26static struct irq_chip jcore_aic;
27 27
28/*
29 * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
30 * not distinguish or use distinct irq number ranges for per-cpu event
31 * interrupts (timer, IPI). Since information to determine whether a
32 * particular irq number should be treated as per-cpu is not available
33 * at mapping time, we use a wrapper handler function which chooses
34 * the right handler at runtime based on whether IRQF_PERCPU was used
35 * when requesting the irq.
36 */
37
38static void handle_jcore_irq(struct irq_desc *desc)
39{
40 if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
41 handle_percpu_irq(desc);
42 else
43 handle_simple_irq(desc);
44}
45
28static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq, 46static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
29 irq_hw_number_t hwirq) 47 irq_hw_number_t hwirq)
30{ 48{
31 struct irq_chip *aic = d->host_data; 49 struct irq_chip *aic = d->host_data;
32 50
33 irq_set_chip_and_handler(irq, aic, handle_simple_irq); 51 irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
34 52
35 return 0; 53 return 0;
36} 54}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8abde6b8cedc..6d53810963f7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -266,7 +266,7 @@ static struct raid_type {
266 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, 266 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
267 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, 267 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
268 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, 268 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
269 {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ 269 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
270 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, 270 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
271 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, 271 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
272 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, 272 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2087 /* 2087 /*
2088 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata 2088 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2089 */ 2089 */
2090 if (le32_to_cpu(sb->level) != mddev->level) { 2090 if (le32_to_cpu(sb->level) != mddev->new_level) {
2091 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); 2091 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
2092 return -EINVAL; 2092 return -EINVAL;
2093 } 2093 }
2094 if (le32_to_cpu(sb->layout) != mddev->layout) { 2094 if (le32_to_cpu(sb->layout) != mddev->new_layout) {
2095 DMERR("Reshaping raid sets not yet supported. (raid layout change)"); 2095 DMERR("Reshaping raid sets not yet supported. (raid layout change)");
2096 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); 2096 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
2097 DMERR(" Old layout: %s w/ %d copies", 2097 DMERR(" Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2102 raid10_md_layout_to_copies(mddev->layout)); 2102 raid10_md_layout_to_copies(mddev->layout));
2103 return -EINVAL; 2103 return -EINVAL;
2104 } 2104 }
2105 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { 2105 if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
2106 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); 2106 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
2107 return -EINVAL; 2107 return -EINVAL;
2108 } 2108 }
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2115 return -EINVAL; 2115 return -EINVAL;
2116 } 2116 }
2117 2117
2118 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2119
2118 /* Table line is checked vs. authoritative superblock */ 2120 /* Table line is checked vs. authoritative superblock */
2119 rs_set_new(rs); 2121 rs_set_new(rs);
2120 } 2122 }
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2258 if (!mddev->events && super_init_validation(rs, rdev)) 2260 if (!mddev->events && super_init_validation(rs, rdev))
2259 return -EINVAL; 2261 return -EINVAL;
2260 2262
2261 if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { 2263 if (le32_to_cpu(sb->compat_features) &&
2264 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2262 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; 2265 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2263 return -EINVAL; 2266 return -EINVAL;
2264 } 2267 }
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
3646 3649
3647static struct target_type raid_target = { 3650static struct target_type raid_target = {
3648 .name = "raid", 3651 .name = "raid",
3649 .version = {1, 9, 0}, 3652 .version = {1, 9, 1},
3650 .module = THIS_MODULE, 3653 .module = THIS_MODULE,
3651 .ctr = raid_ctr, 3654 .ctr = raid_ctr,
3652 .dtr = raid_dtr, 3655 .dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bdf1606f67bc..9a8b71067c6e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
145 145
146struct dm_raid1_bio_record { 146struct dm_raid1_bio_record {
147 struct mirror *m; 147 struct mirror *m;
148 /* if details->bi_bdev == NULL, details were not saved */
149 struct dm_bio_details details; 148 struct dm_bio_details details;
150 region_t write_region; 149 region_t write_region;
151}; 150};
@@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1200 struct dm_raid1_bio_record *bio_record = 1199 struct dm_raid1_bio_record *bio_record =
1201 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1202 1201
1203 bio_record->details.bi_bdev = NULL;
1204
1205 if (rw == WRITE) { 1202 if (rw == WRITE) {
1206 /* Save region for mirror_end_io() handler */ 1203 /* Save region for mirror_end_io() handler */
1207 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1204 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1260 } 1257 }
1261 1258
1262 if (error == -EOPNOTSUPP) 1259 if (error == -EOPNOTSUPP)
1263 goto out; 1260 return error;
1264 1261
1265 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) 1262 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
1266 goto out; 1263 return error;
1267 1264
1268 if (unlikely(error)) { 1265 if (unlikely(error)) {
1269 if (!bio_record->details.bi_bdev) {
1270 /*
1271 * There wasn't enough memory to record necessary
1272 * information for a retry or there was no other
1273 * mirror in-sync.
1274 */
1275 DMERR_LIMIT("Mirror read failed.");
1276 return -EIO;
1277 }
1278
1279 m = bio_record->m; 1266 m = bio_record->m;
1280 1267
1281 DMERR("Mirror read failed from %s. Trying alternative device.", 1268 DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1291 bd = &bio_record->details; 1278 bd = &bio_record->details;
1292 1279
1293 dm_bio_restore(bd, bio); 1280 dm_bio_restore(bd, bio);
1294 bio_record->details.bi_bdev = NULL; 1281 bio->bi_error = 0;
1295 1282
1296 queue_bio(ms, bio, rw); 1283 queue_bio(ms, bio, rw);
1297 return DM_ENDIO_INCOMPLETE; 1284 return DM_ENDIO_INCOMPLETE;
@@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
1299 DMERR("All replicated volumes dead, failing I/O"); 1286 DMERR("All replicated volumes dead, failing I/O");
1300 } 1287 }
1301 1288
1302out:
1303 bio_record->details.bi_bdev = NULL;
1304
1305 return error; 1289 return error;
1306} 1290}
1307 1291
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index dc75bea0d541..1d0d2adc050a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
856 kthread_init_worker(&md->kworker); 856 kthread_init_worker(&md->kworker);
857 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 857 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
858 "kdmwork-%s", dm_device_name(md)); 858 "kdmwork-%s", dm_device_name(md));
859 if (IS_ERR(md->kworker_task)) 859 if (IS_ERR(md->kworker_task)) {
860 return PTR_ERR(md->kworker_task); 860 int error = PTR_ERR(md->kworker_task);
861 md->kworker_task = NULL;
862 return error;
863 }
861 864
862 elv_register_queue(md->queue); 865 elv_register_queue(md->queue);
863 866
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3e407a9cde1f..c4b53b332607 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
695 695
696 tgt->type = dm_get_target_type(type); 696 tgt->type = dm_get_target_type(type);
697 if (!tgt->type) { 697 if (!tgt->type) {
698 DMERR("%s: %s: unknown target type", dm_device_name(t->md), 698 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
699 type);
700 return -EINVAL; 699 return -EINVAL;
701 } 700 }
702 701
703 if (dm_target_needs_singleton(tgt->type)) { 702 if (dm_target_needs_singleton(tgt->type)) {
704 if (t->num_targets) { 703 if (t->num_targets) {
705 DMERR("%s: target type %s must appear alone in table", 704 tgt->error = "singleton target type must appear alone in table";
706 dm_device_name(t->md), type); 705 goto bad;
707 return -EINVAL;
708 } 706 }
709 t->singleton = true; 707 t->singleton = true;
710 } 708 }
711 709
712 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { 710 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
713 DMERR("%s: target type %s may not be included in read-only tables", 711 tgt->error = "target type may not be included in a read-only table";
714 dm_device_name(t->md), type); 712 goto bad;
715 return -EINVAL;
716 } 713 }
717 714
718 if (t->immutable_target_type) { 715 if (t->immutable_target_type) {
719 if (t->immutable_target_type != tgt->type) { 716 if (t->immutable_target_type != tgt->type) {
720 DMERR("%s: immutable target type %s cannot be mixed with other target types", 717 tgt->error = "immutable target type cannot be mixed with other target types";
721 dm_device_name(t->md), t->immutable_target_type->name); 718 goto bad;
722 return -EINVAL;
723 } 719 }
724 } else if (dm_target_is_immutable(tgt->type)) { 720 } else if (dm_target_is_immutable(tgt->type)) {
725 if (t->num_targets) { 721 if (t->num_targets) {
726 DMERR("%s: immutable target type %s cannot be mixed with other target types", 722 tgt->error = "immutable target type cannot be mixed with other target types";
727 dm_device_name(t->md), tgt->type->name); 723 goto bad;
728 return -EINVAL;
729 } 724 }
730 t->immutable_target_type = tgt->type; 725 t->immutable_target_type = tgt->type;
731 } 726 }
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
740 */ 735 */
741 if (!adjoin(t, tgt)) { 736 if (!adjoin(t, tgt)) {
742 tgt->error = "Gap in table"; 737 tgt->error = "Gap in table";
743 r = -EINVAL;
744 goto bad; 738 goto bad;
745 } 739 }
746 740
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 147af9536d0c..ef7bf1dd6900 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
1423 if (md->bs) 1423 if (md->bs)
1424 bioset_free(md->bs); 1424 bioset_free(md->bs);
1425 1425
1426 cleanup_srcu_struct(&md->io_barrier);
1427
1428 if (md->disk) { 1426 if (md->disk) {
1429 spin_lock(&_minor_lock); 1427 spin_lock(&_minor_lock);
1430 md->disk->private_data = NULL; 1428 md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
1436 if (md->queue) 1434 if (md->queue)
1437 blk_cleanup_queue(md->queue); 1435 blk_cleanup_queue(md->queue);
1438 1436
1437 cleanup_srcu_struct(&md->io_barrier);
1438
1439 if (md->bdev) { 1439 if (md->bdev) {
1440 bdput(md->bdev); 1440 bdput(md->bdev);
1441 md->bdev = NULL; 1441 md->bdev = NULL;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469fe842..2c9232ef7baa 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
124 } 124 }
125 125
126 /* Get user pages for DMA Xfer */ 126 /* Get user pages for DMA Xfer */
127 err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0, 127 err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
128 1, dma->map); 128 dma->map, FOLL_FORCE);
129 129
130 if (user_dma.page_count != err) { 130 if (user_dma.page_count != err) {
131 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n", 131 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054cda6e..f7299d3d8244 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
76 76
77 /* Get user pages for DMA Xfer */ 77 /* Get user pages for DMA Xfer */
78 y_pages = get_user_pages_unlocked(y_dma.uaddr, 78 y_pages = get_user_pages_unlocked(y_dma.uaddr,
79 y_dma.page_count, 0, 1, &dma->map[0]); 79 y_dma.page_count, &dma->map[0], FOLL_FORCE);
80 uv_pages = 0; /* silence gcc. value is set and consumed only if: */ 80 uv_pages = 0; /* silence gcc. value is set and consumed only if: */
81 if (y_pages == y_dma.page_count) { 81 if (y_pages == y_dma.page_count) {
82 uv_pages = get_user_pages_unlocked(uv_dma.uaddr, 82 uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
83 uv_dma.page_count, 0, 1, &dma->map[y_pages]); 83 uv_dma.page_count, &dma->map[y_pages],
84 FOLL_FORCE);
84 } 85 }
85 86
86 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) { 87 if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index e668dde6d857..a31b95cb3b09 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
214 if (!vec) 214 if (!vec)
215 return -ENOMEM; 215 return -ENOMEM;
216 216
217 ret = get_vaddr_frames(virtp, 1, true, false, vec); 217 ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
218 if (ret != 1) { 218 if (ret != 1) {
219 frame_vector_destroy(vec); 219 frame_vector_destroy(vec);
220 return -EINVAL; 220 return -EINVAL;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f060b3f3..1db0af6c7f94 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
156{ 156{
157 unsigned long first, last; 157 unsigned long first, last;
158 int err, rw = 0; 158 int err, rw = 0;
159 unsigned int flags = FOLL_FORCE;
159 160
160 dma->direction = direction; 161 dma->direction = direction;
161 switch (dma->direction) { 162 switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
178 if (NULL == dma->pages) 179 if (NULL == dma->pages)
179 return -ENOMEM; 180 return -ENOMEM;
180 181
182 if (rw == READ)
183 flags |= FOLL_WRITE;
184
181 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", 185 dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
182 data, size, dma->nr_pages); 186 data, size, dma->nr_pages);
183 187
184 err = get_user_pages(data & PAGE_MASK, dma->nr_pages, 188 err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
185 rw == READ, 1, /* force */ 189 flags, dma->pages, NULL);
186 dma->pages, NULL);
187 190
188 if (err != dma->nr_pages) { 191 if (err != dma->nr_pages) {
189 dma->nr_pages = (err >= 0) ? err : 0; 192 dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..1cd322e939c7 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
42 unsigned long first, last; 42 unsigned long first, last;
43 unsigned long nr; 43 unsigned long nr;
44 struct frame_vector *vec; 44 struct frame_vector *vec;
45 unsigned int flags = FOLL_FORCE;
46
47 if (write)
48 flags |= FOLL_WRITE;
45 49
46 first = start >> PAGE_SHIFT; 50 first = start >> PAGE_SHIFT;
47 last = (start + length - 1) >> PAGE_SHIFT; 51 last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
49 vec = frame_vector_create(nr); 53 vec = frame_vector_create(nr);
50 if (!vec) 54 if (!vec)
51 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec); 56 ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
53 if (ret < 0) 57 if (ret < 0)
54 goto out_destroy; 58 goto out_destroy;
55 /* We accept only complete set of PFNs */ 59 /* We accept only complete set of PFNs */
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index d34bc3530385..2e3cf012ef48 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
524 int rc; 524 int rc;
525 525
526 if (!host->req) { 526 if (!host->req) {
527 pm_runtime_get_sync(ms_dev(host));
527 do { 528 do {
528 rc = memstick_next_req(msh, &host->req); 529 rc = memstick_next_req(msh, &host->req);
529 dev_dbg(ms_dev(host), "next req %d\n", rc); 530 dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
544 host->req->error); 545 host->req->error);
545 } 546 }
546 } while (!rc); 547 } while (!rc);
548 pm_runtime_put(ms_dev(host));
547 } 549 }
548 550
549} 551}
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
570 dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", 572 dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
571 __func__, param, value); 573 __func__, param, value);
572 574
575 pm_runtime_get_sync(ms_dev(host));
573 mutex_lock(&ucr->dev_mutex); 576 mutex_lock(&ucr->dev_mutex);
574 577
575 err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); 578 err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
635 } 638 }
636out: 639out:
637 mutex_unlock(&ucr->dev_mutex); 640 mutex_unlock(&ucr->dev_mutex);
641 pm_runtime_put(ms_dev(host));
638 642
639 /* power-on delay */ 643 /* power-on delay */
640 if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) 644 if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
681 int err; 685 int err;
682 686
683 for (;;) { 687 for (;;) {
688 pm_runtime_get_sync(ms_dev(host));
684 mutex_lock(&ucr->dev_mutex); 689 mutex_lock(&ucr->dev_mutex);
685 690
686 /* Check pending MS card changes */ 691 /* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
703 } 708 }
704 709
705poll_again: 710poll_again:
711 pm_runtime_put(ms_dev(host));
706 if (host->eject) 712 if (host->eject)
707 break; 713 break;
708 714
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index f3d34b941f85..2e5233b60971 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
229 if (ctx->status == STARTED) 229 if (ctx->status == STARTED)
230 goto out; /* already started */ 230 goto out; /* already started */
231 231
232 /*
233 * Increment the mapped context count for adapter. This also checks
234 * if adapter_context_lock is taken.
235 */
236 rc = cxl_adapter_context_get(ctx->afu->adapter);
237 if (rc)
238 goto out;
239
232 if (task) { 240 if (task) {
233 ctx->pid = get_task_pid(task, PIDTYPE_PID); 241 ctx->pid = get_task_pid(task, PIDTYPE_PID);
234 ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); 242 ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -239,7 +247,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
239 cxl_ctx_get(); 247 cxl_ctx_get();
240 248
241 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { 249 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
250 put_pid(ctx->glpid);
242 put_pid(ctx->pid); 251 put_pid(ctx->pid);
252 ctx->glpid = ctx->pid = NULL;
253 cxl_adapter_context_put(ctx->afu->adapter);
243 cxl_ctx_put(); 254 cxl_ctx_put();
244 goto out; 255 goto out;
245 } 256 }
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c466ee2b0c97..5e506c19108a 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
238 put_pid(ctx->glpid); 238 put_pid(ctx->glpid);
239 239
240 cxl_ctx_put(); 240 cxl_ctx_put();
241
242 /* Decrease the attached context count on the adapter */
243 cxl_adapter_context_put(ctx->afu->adapter);
241 return 0; 244 return 0;
242} 245}
243 246
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 01d372aba131..a144073593fa 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -618,6 +618,14 @@ struct cxl {
618 bool perst_select_user; 618 bool perst_select_user;
619 bool perst_same_image; 619 bool perst_same_image;
620 bool psl_timebase_synced; 620 bool psl_timebase_synced;
621
622 /*
623 * number of contexts mapped on to this card. Possible values are:
624 * >0: Number of contexts mapped and new one can be mapped.
625 * 0: No active contexts and new ones can be mapped.
626 * -1: No contexts mapped and new ones cannot be mapped.
627 */
628 atomic_t contexts_num;
621}; 629};
622 630
623int cxl_pci_alloc_one_irq(struct cxl *adapter); 631int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
944 952
945/* decode AFU error bits in the PSL register PSL_SERR_An */ 953/* decode AFU error bits in the PSL register PSL_SERR_An */
946void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); 954void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
955
956/*
957 * Increments the number of attached contexts on an adapter.
958 * In case an adapter_context_lock is taken the return -EBUSY.
959 */
960int cxl_adapter_context_get(struct cxl *adapter);
961
962/* Decrements the number of attached contexts on an adapter */
963void cxl_adapter_context_put(struct cxl *adapter);
964
965/* If no active contexts then prevents contexts from being attached */
966int cxl_adapter_context_lock(struct cxl *adapter);
967
968/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
969void cxl_adapter_context_unlock(struct cxl *adapter);
970
947#endif 971#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 5fb9894b157f..77080cc5fa0a 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
194 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); 194 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
195 195
196 /* 196 /*
197 * Increment the mapped context count for adapter. This also checks
198 * if adapter_context_lock is taken.
199 */
200 rc = cxl_adapter_context_get(ctx->afu->adapter);
201 if (rc) {
202 afu_release_irqs(ctx, ctx);
203 goto out;
204 }
205
206 /*
197 * We grab the PID here and not in the file open to allow for the case 207 * We grab the PID here and not in the file open to allow for the case
198 * where a process (master, some daemon, etc) has opened the chardev on 208 * where a process (master, some daemon, etc) has opened the chardev on
199 * behalf of another process, so the AFU's mm gets bound to the process 209 * behalf of another process, so the AFU's mm gets bound to the process
@@ -205,11 +215,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
205 ctx->pid = get_task_pid(current, PIDTYPE_PID); 215 ctx->pid = get_task_pid(current, PIDTYPE_PID);
206 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); 216 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
207 217
218
208 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); 219 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
209 220
210 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, 221 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
211 amr))) { 222 amr))) {
212 afu_release_irqs(ctx, ctx); 223 afu_release_irqs(ctx, ctx);
224 cxl_adapter_context_put(ctx->afu->adapter);
225 put_pid(ctx->glpid);
226 put_pid(ctx->pid);
227 ctx->glpid = ctx->pid = NULL;
213 goto out; 228 goto out;
214 } 229 }
215 230
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9aa58a77a24d..3e102cd6ed91 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
1152 if ((rc = cxl_sysfs_adapter_add(adapter))) 1152 if ((rc = cxl_sysfs_adapter_add(adapter)))
1153 goto err_put1; 1153 goto err_put1;
1154 1154
1155 /* release the context lock as the adapter is configured */
1156 cxl_adapter_context_unlock(adapter);
1157
1155 return adapter; 1158 return adapter;
1156 1159
1157err_put1: 1160err_put1:
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index d9be23b24aa3..62e0dfb5f15b 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
243 if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) 243 if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
244 goto err2; 244 goto err2;
245 245
246 return adapter; 246 /* start with context lock taken */
247 atomic_set(&adapter->contexts_num, -1);
247 248
249 return adapter;
248err2: 250err2:
249 cxl_remove_adapter_nr(adapter); 251 cxl_remove_adapter_nr(adapter);
250err1: 252err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
286 return 0; 288 return 0;
287} 289}
288 290
291int cxl_adapter_context_get(struct cxl *adapter)
292{
293 int rc;
294
295 rc = atomic_inc_unless_negative(&adapter->contexts_num);
296 return rc >= 0 ? 0 : -EBUSY;
297}
298
299void cxl_adapter_context_put(struct cxl *adapter)
300{
301 atomic_dec_if_positive(&adapter->contexts_num);
302}
303
304int cxl_adapter_context_lock(struct cxl *adapter)
305{
306 int rc;
307 /* no active contexts -> contexts_num == 0 */
308 rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
309 return rc ? -EBUSY : 0;
310}
311
312void cxl_adapter_context_unlock(struct cxl *adapter)
313{
314 int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
315
316 /*
317 * contexts lock taken -> contexts_num == -1
318 * If not true then show a warning and force reset the lock.
319 * This will happen when context_unlock was requested without
320 * doing a context_lock.
321 */
322 if (val != -1) {
323 atomic_set(&adapter->contexts_num, 0);
324 WARN(1, "Adapter context unlocked with %d active contexts",
325 val);
326 }
327}
328
289static int __init init_cxl(void) 329static int __init init_cxl(void)
290{ 330{
291 int rc = 0; 331 int rc = 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 7afad8477ad5..e96be9ca4e60 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1487 if ((rc = cxl_native_register_psl_err_irq(adapter))) 1487 if ((rc = cxl_native_register_psl_err_irq(adapter)))
1488 goto err; 1488 goto err;
1489 1489
1490 /* Release the context lock as adapter is configured */
1491 cxl_adapter_context_unlock(adapter);
1490 return 0; 1492 return 0;
1491 1493
1492err: 1494err:
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index b043c20f158f..a8b6d6a635e9 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
75 int val; 75 int val;
76 76
77 rc = sscanf(buf, "%i", &val); 77 rc = sscanf(buf, "%i", &val);
78 if ((rc != 1) || (val != 1)) 78 if ((rc != 1) || (val != 1 && val != -1))
79 return -EINVAL; 79 return -EINVAL;
80 80
81 if ((rc = cxl_ops->adapter_reset(adapter))) 81 /*
82 return rc; 82 * See if we can lock the context mapping that's only allowed
83 return count; 83 * when there are no contexts attached to the adapter. Once
84 * taken this will also prevent any context from getting activated.
85 */
86 if (val == 1) {
87 rc = cxl_adapter_context_lock(adapter);
88 if (rc)
89 goto out;
90
91 rc = cxl_ops->adapter_reset(adapter);
92 /* In case reset failed release context lock */
93 if (rc)
94 cxl_adapter_context_unlock(adapter);
95
96 } else if (val == -1) {
97 /* Perform a forced adapter reset */
98 rc = cxl_ops->adapter_reset(adapter);
99 }
100
101out:
102 return rc ? rc : count;
84} 103}
85 104
86static ssize_t load_image_on_perst_show(struct device *device, 105static ssize_t load_image_on_perst_show(struct device *device,
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1a20fd..f806a4471eb9 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@ retry:
1396 pinned_pages->nr_pages = get_user_pages( 1396 pinned_pages->nr_pages = get_user_pages(
1397 (u64)addr, 1397 (u64)addr,
1398 nr_pages, 1398 nr_pages,
1399 !!(prot & SCIF_PROT_WRITE), 1399 (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
1400 0,
1401 pinned_pages->pages, 1400 pinned_pages->pages,
1402 NULL); 1401 NULL);
1403 up_write(&mm->mmap_sem); 1402 up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9b17e3..6fb773dbcd0c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
198#else 198#else
199 *pageshift = PAGE_SHIFT; 199 *pageshift = PAGE_SHIFT;
200#endif 200#endif
201 if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0) 201 if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
202 return -EFAULT; 202 return -EFAULT;
203 *paddr = page_to_phys(page); 203 *paddr = page_to_phys(page);
204 put_page(page); 204 put_page(page);
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 1525870f460a..33741ad4a74a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
283 spin_lock(&gru->gs_asid_lock); 283 spin_lock(&gru->gs_asid_lock);
284 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 284 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
285 asids->mt_ctxbitmap ^= ctxbitmap; 285 asids->mt_ctxbitmap ^= ctxbitmap;
286 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 286 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
287 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); 287 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
288 spin_unlock(&gru->gs_asid_lock); 288 spin_unlock(&gru->gs_asid_lock);
289 spin_unlock(&gms->ms_asid_lock); 289 spin_unlock(&gms->ms_asid_lock);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c3335112e68c..709a872ed484 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -46,6 +46,7 @@
46#include <asm/uaccess.h> 46#include <asm/uaccess.h>
47 47
48#include "queue.h" 48#include "queue.h"
49#include "block.h"
49 50
50MODULE_ALIAS("mmc:block"); 51MODULE_ALIAS("mmc:block");
51#ifdef MODULE_PARAM_PREFIX 52#ifdef MODULE_PARAM_PREFIX
@@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1786 struct mmc_blk_data *md = mq->data; 1787 struct mmc_blk_data *md = mq->data;
1787 struct mmc_packed *packed = mqrq->packed; 1788 struct mmc_packed *packed = mqrq->packed;
1788 bool do_rel_wr, do_data_tag; 1789 bool do_rel_wr, do_data_tag;
1789 u32 *packed_cmd_hdr; 1790 __le32 *packed_cmd_hdr;
1790 u8 hdr_blocks; 1791 u8 hdr_blocks;
1791 u8 i = 1; 1792 u8 i = 1;
1792 1793
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 3c15a75bae86..342f1e3f301e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -31,7 +31,7 @@ enum mmc_packed_type {
31 31
32struct mmc_packed { 32struct mmc_packed {
33 struct list_head list; 33 struct list_head list;
34 u32 cmd_hdr[1024]; 34 __le32 cmd_hdr[1024];
35 unsigned int blocks; 35 unsigned int blocks;
36 u8 nr_entries; 36 u8 nr_entries;
37 u8 retries; 37 u8 retries;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3486bc7fbb64..39fc5b2b96c5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
1263 goto out_err; 1263 goto out_err;
1264 } 1264 }
1265 1265
1266 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
1267 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1268
1269 if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
1270 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1271
1272 /* If fails try again during next card power cycle */
1273 if (err)
1274 goto out_err;
1275
1266 err = mmc_select_bus_width(card); 1276 err = mmc_select_bus_width(card);
1267 if (err < 0) 1277 if (err < 0)
1268 goto out_err; 1278 goto out_err;
@@ -1272,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
1272 if (err) 1282 if (err)
1273 goto out_err; 1283 goto out_err;
1274 1284
1285 mmc_set_clock(host, card->ext_csd.hs_max_dtr);
1286
1275 err = mmc_switch_status(card); 1287 err = mmc_switch_status(card);
1276 if (err) 1288 if (err)
1277 goto out_err; 1289 goto out_err;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 4106295527b9..6e9c0f8fddb1 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1138 dev_dbg(sdmmc_dev(host), "%s\n", __func__); 1138 dev_dbg(sdmmc_dev(host), "%s\n", __func__);
1139 mutex_lock(&ucr->dev_mutex); 1139 mutex_lock(&ucr->dev_mutex);
1140 1140
1141 if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
1142 mutex_unlock(&ucr->dev_mutex);
1143 return;
1144 }
1145
1146 sd_set_power_mode(host, ios->power_mode); 1141 sd_set_power_mode(host, ios->power_mode);
1147 sd_set_bus_width(host, ios->bus_width); 1142 sd_set_bus_width(host, ios->bus_width);
1148 sd_set_timing(host, ios->timing, &host->ddr_mode); 1143 sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1314 container_of(work, struct rtsx_usb_sdmmc, led_work); 1309 container_of(work, struct rtsx_usb_sdmmc, led_work);
1315 struct rtsx_ucr *ucr = host->ucr; 1310 struct rtsx_ucr *ucr = host->ucr;
1316 1311
1312 pm_runtime_get_sync(sdmmc_dev(host));
1317 mutex_lock(&ucr->dev_mutex); 1313 mutex_lock(&ucr->dev_mutex);
1318 1314
1319 if (host->led.brightness == LED_OFF) 1315 if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1322 rtsx_usb_turn_on_led(ucr); 1318 rtsx_usb_turn_on_led(ucr);
1323 1319
1324 mutex_unlock(&ucr->dev_mutex); 1320 mutex_unlock(&ucr->dev_mutex);
1321 pm_runtime_put(sdmmc_dev(host));
1325} 1322}
1326#endif 1323#endif
1327 1324
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f54fd8755c8..7123ef96ed18 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
346 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 346 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
347 u32 data; 347 u32 data;
348 348
349 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { 349 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
350 reg == SDHCI_INT_STATUS)) {
350 if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { 351 if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
351 /* 352 /*
352 * Clear and then set D3CD bit to avoid missing the 353 * Clear and then set D3CD bit to avoid missing the
@@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
555 esdhc_clrset_le(host, 0xffff, val, reg); 556 esdhc_clrset_le(host, 0xffff, val, reg);
556} 557}
557 558
559static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
560{
561 u8 ret;
562 u32 val;
563
564 switch (reg) {
565 case SDHCI_HOST_CONTROL:
566 val = readl(host->ioaddr + reg);
567
568 ret = val & SDHCI_CTRL_LED;
569 ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
570 ret |= (val & ESDHC_CTRL_4BITBUS);
571 ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
572 return ret;
573 }
574
575 return readb(host->ioaddr + reg);
576}
577
558static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) 578static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
559{ 579{
560 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 580 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
947static struct sdhci_ops sdhci_esdhc_ops = { 967static struct sdhci_ops sdhci_esdhc_ops = {
948 .read_l = esdhc_readl_le, 968 .read_l = esdhc_readl_le,
949 .read_w = esdhc_readw_le, 969 .read_w = esdhc_readw_le,
970 .read_b = esdhc_readb_le,
950 .write_l = esdhc_writel_le, 971 .write_l = esdhc_writel_le,
951 .write_w = esdhc_writew_le, 972 .write_w = esdhc_writew_le,
952 .write_b = esdhc_writeb_le, 973 .write_b = esdhc_writeb_le,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index da8e40af6f85..410a55b1c25f 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
250 writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER); 250 writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
251} 251}
252 252
253void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) 253static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
254{ 254{
255 u8 ctrl; 255 u8 ctrl;
256 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 256 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
265 } 265 }
266} 266}
267 267
268static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
269 struct mmc_ios *ios)
270{
271 switch (ios->signal_voltage) {
272 case MMC_SIGNAL_VOLTAGE_180:
273 /*
274 * Plese don't switch to 1V8 as arasan,5.1 doesn't
275 * actually refer to this setting to indicate the
276 * signal voltage and the state machine will be broken
277 * actually if we force to enable 1V8. That's something
278 * like broken quirk but we could work around here.
279 */
280 return 0;
281 case MMC_SIGNAL_VOLTAGE_330:
282 case MMC_SIGNAL_VOLTAGE_120:
283 /* We don't support 3V3 and 1V2 */
284 break;
285 }
286
287 return -EINVAL;
288}
289
268static struct sdhci_ops sdhci_arasan_ops = { 290static struct sdhci_ops sdhci_arasan_ops = {
269 .set_clock = sdhci_arasan_set_clock, 291 .set_clock = sdhci_arasan_set_clock,
270 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 292 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
661 683
662 host->mmc_host_ops.hs400_enhanced_strobe = 684 host->mmc_host_ops.hs400_enhanced_strobe =
663 sdhci_arasan_hs400_enhanced_strobe; 685 sdhci_arasan_hs400_enhanced_strobe;
686 host->mmc_host_ops.start_signal_voltage_switch =
687 sdhci_arasan_voltage_switch;
664 } 688 }
665 689
666 ret = sdhci_add_host(host); 690 ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 72a1f1f5180a..1d9e00a00e9f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,6 +32,14 @@
32#include "sdhci-pci.h" 32#include "sdhci-pci.h"
33#include "sdhci-pci-o2micro.h" 33#include "sdhci-pci-o2micro.h"
34 34
35static int sdhci_pci_enable_dma(struct sdhci_host *host);
36static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
37static void sdhci_pci_hw_reset(struct sdhci_host *host);
38static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
39 struct mmc_card *card,
40 unsigned int max_dtr, int host_drv,
41 int card_drv, int *drv_type);
42
35/*****************************************************************************\ 43/*****************************************************************************\
36 * * 44 * *
37 * Hardware specific quirk handling * 45 * Hardware specific quirk handling *
@@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
390 return 0; 398 return 0;
391} 399}
392 400
401#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
402#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
403
404static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
405 unsigned short vdd)
406{
407 int cntr;
408 u8 reg;
409
410 sdhci_set_power(host, mode, vdd);
411
412 if (mode == MMC_POWER_OFF)
413 return;
414
415 /*
416 * Bus power might not enable after D3 -> D0 transition due to the
417 * present state not yet having propagated. Retry for up to 2ms.
418 */
419 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
420 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
421 if (reg & SDHCI_POWER_ON)
422 break;
423 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
424 reg |= SDHCI_POWER_ON;
425 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
426 }
427}
428
429static const struct sdhci_ops sdhci_intel_byt_ops = {
430 .set_clock = sdhci_set_clock,
431 .set_power = sdhci_intel_set_power,
432 .enable_dma = sdhci_pci_enable_dma,
433 .set_bus_width = sdhci_pci_set_bus_width,
434 .reset = sdhci_reset,
435 .set_uhs_signaling = sdhci_set_uhs_signaling,
436 .hw_reset = sdhci_pci_hw_reset,
437 .select_drive_strength = sdhci_pci_select_drive_strength,
438};
439
393static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { 440static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
394 .allow_runtime_pm = true, 441 .allow_runtime_pm = true,
395 .probe_slot = byt_emmc_probe_slot, 442 .probe_slot = byt_emmc_probe_slot,
@@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
397 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 444 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
398 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 445 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
399 SDHCI_QUIRK2_STOP_WITH_TC, 446 SDHCI_QUIRK2_STOP_WITH_TC,
447 .ops = &sdhci_intel_byt_ops,
400}; 448};
401 449
402static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { 450static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
405 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 453 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
406 .allow_runtime_pm = true, 454 .allow_runtime_pm = true,
407 .probe_slot = byt_sdio_probe_slot, 455 .probe_slot = byt_sdio_probe_slot,
456 .ops = &sdhci_intel_byt_ops,
408}; 457};
409 458
410static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { 459static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
415 .allow_runtime_pm = true, 464 .allow_runtime_pm = true,
416 .own_cd_for_runtime_pm = true, 465 .own_cd_for_runtime_pm = true,
417 .probe_slot = byt_sd_probe_slot, 466 .probe_slot = byt_sd_probe_slot,
467 .ops = &sdhci_intel_byt_ops,
418}; 468};
419 469
420/* Define Host controllers for Intel Merrifield platform */ 470/* Define Host controllers for Intel Merrifield platform */
@@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
1648 } 1698 }
1649 1699
1650 host->hw_name = "PCI"; 1700 host->hw_name = "PCI";
1651 host->ops = &sdhci_pci_ops; 1701 host->ops = chip->fixes && chip->fixes->ops ?
1702 chip->fixes->ops :
1703 &sdhci_pci_ops;
1652 host->quirks = chip->quirks; 1704 host->quirks = chip->quirks;
1653 host->quirks2 = chip->quirks2; 1705 host->quirks2 = chip->quirks2;
1654 1706
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 9c7c08b93223..6bccf56bc5ff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -65,6 +65,8 @@ struct sdhci_pci_fixes {
65 65
66 int (*suspend) (struct sdhci_pci_chip *); 66 int (*suspend) (struct sdhci_pci_chip *);
67 int (*resume) (struct sdhci_pci_chip *); 67 int (*resume) (struct sdhci_pci_chip *);
68
69 const struct sdhci_ops *ops;
68}; 70};
69 71
70struct sdhci_pci_slot { 72struct sdhci_pci_slot {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index dd1938d341f7..d0f5c05fbc19 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
315 struct mmc_host *mmc = host->mmc; 315 struct mmc_host *mmc = host->mmc;
316 u8 pwr = host->pwr; 316 u8 pwr = host->pwr;
317 317
318 sdhci_set_power(host, mode, vdd); 318 sdhci_set_power_noreg(host, mode, vdd);
319 319
320 if (host->pwr == pwr) 320 if (host->pwr == pwr)
321 return; 321 return;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 48055666c655..71654b90227f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
687 * host->clock is in Hz. target_timeout is in us. 687 * host->clock is in Hz. target_timeout is in us.
688 * Hence, us = 1000000 * cycles / Hz. Round up. 688 * Hence, us = 1000000 * cycles / Hz. Round up.
689 */ 689 */
690 val = 1000000 * data->timeout_clks; 690 val = 1000000ULL * data->timeout_clks;
691 if (do_div(val, host->clock)) 691 if (do_div(val, host->clock))
692 target_timeout++; 692 target_timeout++;
693 target_timeout += val; 693 target_timeout += val;
@@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1077 /* Initially, a command has no error */ 1077 /* Initially, a command has no error */
1078 cmd->error = 0; 1078 cmd->error = 0;
1079 1079
1080 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1081 cmd->opcode == MMC_STOP_TRANSMISSION)
1082 cmd->flags |= MMC_RSP_BUSY;
1083
1080 /* Wait max 10 ms */ 1084 /* Wait max 10 ms */
1081 timeout = 10; 1085 timeout = 10;
1082 1086
@@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1390 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1394 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1391} 1395}
1392 1396
1393void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1397void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1394 unsigned short vdd) 1398 unsigned short vdd)
1395{ 1399{
1396 u8 pwr = 0; 1400 u8 pwr = 0;
1397 1401
@@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1455 mdelay(10); 1459 mdelay(10);
1456 } 1460 }
1457} 1461}
1458EXPORT_SYMBOL_GPL(sdhci_set_power); 1462EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1459 1463
1460static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1464void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1461 unsigned short vdd) 1465 unsigned short vdd)
1462{ 1466{
1463 struct mmc_host *mmc = host->mmc; 1467 if (IS_ERR(host->mmc->supply.vmmc))
1464 1468 sdhci_set_power_noreg(host, mode, vdd);
1465 if (host->ops->set_power)
1466 host->ops->set_power(host, mode, vdd);
1467 else if (!IS_ERR(mmc->supply.vmmc))
1468 sdhci_set_power_reg(host, mode, vdd);
1469 else 1469 else
1470 sdhci_set_power(host, mode, vdd); 1470 sdhci_set_power_reg(host, mode, vdd);
1471} 1471}
1472EXPORT_SYMBOL_GPL(sdhci_set_power);
1472 1473
1473/*****************************************************************************\ 1474/*****************************************************************************\
1474 * * 1475 * *
@@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1609 } 1610 }
1610 } 1611 }
1611 1612
1612 __sdhci_set_power(host, ios->power_mode, ios->vdd); 1613 if (host->ops->set_power)
1614 host->ops->set_power(host, ios->power_mode, ios->vdd);
1615 else
1616 sdhci_set_power(host, ios->power_mode, ios->vdd);
1613 1617
1614 if (host->ops->platform_send_init_74_clocks) 1618 if (host->ops->platform_send_init_74_clocks)
1615 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1619 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
2409 * * 2413 * *
2410\*****************************************************************************/ 2414\*****************************************************************************/
2411 2415
2412static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) 2416static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2413{ 2417{
2414 if (!host->cmd) { 2418 if (!host->cmd) {
2415 /* 2419 /*
@@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
2453 return; 2457 return;
2454 } 2458 }
2455 2459
2456 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
2457 !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
2458 host->cmd->opcode == MMC_STOP_TRANSMISSION)
2459 *mask &= ~SDHCI_INT_DATA_END;
2460
2461 if (intmask & SDHCI_INT_RESPONSE) 2460 if (intmask & SDHCI_INT_RESPONSE)
2462 sdhci_finish_command(host); 2461 sdhci_finish_command(host);
2463} 2462}
@@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2680 } 2679 }
2681 2680
2682 if (intmask & SDHCI_INT_CMD_MASK) 2681 if (intmask & SDHCI_INT_CMD_MASK)
2683 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, 2682 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2684 &intmask);
2685 2683
2686 if (intmask & SDHCI_INT_DATA_MASK) 2684 if (intmask & SDHCI_INT_DATA_MASK)
2687 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 2685 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c722cd23205c..766df17fb7eb 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
683void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); 683void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
684void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 684void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
685 unsigned short vdd); 685 unsigned short vdd);
686void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
687 unsigned short vdd);
686void sdhci_set_bus_width(struct sdhci_host *host, int width); 688void sdhci_set_bus_width(struct sdhci_host *host, int width);
687void sdhci_reset(struct sdhci_host *host, u8 mask); 689void sdhci_reset(struct sdhci_host *host, u8 mask);
688void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 690void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 95c4048a371e..388e46be6ad9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -741,6 +741,7 @@ static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
741 goto out_put; 741 goto out_put;
742 } 742 }
743 743
744 vid_hdr = ubi_get_vid_hdr(vidb);
744 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); 745 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
745 746
746 mutex_lock(&ubi->buf_mutex); 747 mutex_lock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index d6384d965788..2ff62157d3bb 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -287,7 +287,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
287 287
288 /* new_aeb is newer */ 288 /* new_aeb is newer */
289 if (cmp_res & 1) { 289 if (cmp_res & 1) {
290 victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum); 290 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
291 if (!victim) 291 if (!victim)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 8b2b740d6679..124c2432ac9c 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -89,7 +89,7 @@ config NVDIMM_PFN
89 Select Y if unsure 89 Select Y if unsure
90 90
91config NVDIMM_DAX 91config NVDIMM_DAX
92 tristate "NVDIMM DAX: Raw access to persistent memory" 92 bool "NVDIMM DAX: Raw access to persistent memory"
93 default LIBNVDIMM 93 default LIBNVDIMM
94 depends on NVDIMM_PFN 94 depends on NVDIMM_PFN
95 help 95 help
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 3509cff68ef9..abe5c6bc756c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2176,12 +2176,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
2176 return devs; 2176 return devs;
2177 2177
2178 err: 2178 err:
2179 for (i = 0; devs[i]; i++) 2179 if (devs) {
2180 if (is_nd_blk(&nd_region->dev)) 2180 for (i = 0; devs[i]; i++)
2181 namespace_blk_release(devs[i]); 2181 if (is_nd_blk(&nd_region->dev))
2182 else 2182 namespace_blk_release(devs[i]);
2183 namespace_pmem_release(devs[i]); 2183 else
2184 kfree(devs); 2184 namespace_pmem_release(devs[i]);
2185 kfree(devs);
2186 }
2185 return NULL; 2187 return NULL;
2186} 2188}
2187 2189
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 42b3a8217073..24618431a14b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
47 return to_nd_region(to_dev(pmem)->parent); 47 return to_nd_region(to_dev(pmem)->parent);
48} 48}
49 49
50static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, 50static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
51 unsigned int len) 51 unsigned int len)
52{ 52{
53 struct device *dev = to_dev(pmem); 53 struct device *dev = to_dev(pmem);
@@ -62,8 +62,12 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
62 __func__, (unsigned long long) sector, 62 __func__, (unsigned long long) sector,
63 cleared / 512, cleared / 512 > 1 ? "s" : ""); 63 cleared / 512, cleared / 512 > 1 ? "s" : "");
64 badblocks_clear(&pmem->bb, sector, cleared / 512); 64 badblocks_clear(&pmem->bb, sector, cleared / 512);
65 } else {
66 return -EIO;
65 } 67 }
68
66 invalidate_pmem(pmem->virt_addr + offset, len); 69 invalidate_pmem(pmem->virt_addr + offset, len);
70 return 0;
67} 71}
68 72
69static void write_pmem(void *pmem_addr, struct page *page, 73static void write_pmem(void *pmem_addr, struct page *page,
@@ -123,7 +127,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
123 flush_dcache_page(page); 127 flush_dcache_page(page);
124 write_pmem(pmem_addr, page, off, len); 128 write_pmem(pmem_addr, page, off, len);
125 if (unlikely(bad_pmem)) { 129 if (unlikely(bad_pmem)) {
126 pmem_clear_poison(pmem, pmem_off, len); 130 rc = pmem_clear_poison(pmem, pmem_off, len);
127 write_pmem(pmem_addr, page, off, len); 131 write_pmem(pmem_addr, page, off, len);
128 } 132 }
129 } 133 }
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 329381a28edf..79e679d12f3b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
554 554
555 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 555 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
556 c.identify.opcode = nvme_admin_identify; 556 c.identify.opcode = nvme_admin_identify;
557 c.identify.cns = cpu_to_le32(1); 557 c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
558 558
559 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 559 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
560 if (!*id) 560 if (!*id)
@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
572 struct nvme_command c = { }; 572 struct nvme_command c = { };
573 573
574 c.identify.opcode = nvme_admin_identify; 574 c.identify.opcode = nvme_admin_identify;
575 c.identify.cns = cpu_to_le32(2); 575 c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
576 c.identify.nsid = cpu_to_le32(nsid); 576 c.identify.nsid = cpu_to_le32(nsid);
577 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 577 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
578} 578}
@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
900 return -ENODEV; 900 return -ENODEV;
901 } 901 }
902 902
903 if (ns->ctrl->vs >= NVME_VS(1, 1)) 903 if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
904 memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); 904 memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
905 if (ns->ctrl->vs >= NVME_VS(1, 2)) 905 if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
906 memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); 906 memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
907 907
908 return 0; 908 return 0;
@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1086 int ret; 1086 int ret;
1087 1087
1088 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1088 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1089 if (csts == ~0)
1090 return -ENODEV;
1089 if ((csts & NVME_CSTS_RDY) == bit) 1091 if ((csts & NVME_CSTS_RDY) == bit)
1090 break; 1092 break;
1091 1093
@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1240 } 1242 }
1241 page_shift = NVME_CAP_MPSMIN(cap) + 12; 1243 page_shift = NVME_CAP_MPSMIN(cap) + 12;
1242 1244
1243 if (ctrl->vs >= NVME_VS(1, 1)) 1245 if (ctrl->vs >= NVME_VS(1, 1, 0))
1244 ctrl->subsystem = NVME_CAP_NSSRC(cap); 1246 ctrl->subsystem = NVME_CAP_NSSRC(cap);
1245 1247
1246 ret = nvme_identify_ctrl(ctrl, &id); 1248 ret = nvme_identify_ctrl(ctrl, &id);
@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
1840 return; 1842 return;
1841 1843
1842 nn = le32_to_cpu(id->nn); 1844 nn = le32_to_cpu(id->nn);
1843 if (ctrl->vs >= NVME_VS(1, 1) && 1845 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1844 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 1846 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1845 if (!nvme_scan_ns_list(ctrl, nn)) 1847 if (!nvme_scan_ns_list(ctrl, nn))
1846 goto done; 1848 goto done;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0fc99f0f2571..0248d0e21fee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -99,6 +99,7 @@ struct nvme_dev {
99 dma_addr_t cmb_dma_addr; 99 dma_addr_t cmb_dma_addr;
100 u64 cmb_size; 100 u64 cmb_size;
101 u32 cmbsz; 101 u32 cmbsz;
102 u32 cmbloc;
102 struct nvme_ctrl ctrl; 103 struct nvme_ctrl ctrl;
103 struct completion ioq_wait; 104 struct completion ioq_wait;
104}; 105};
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
893 "I/O %d QID %d timeout, reset controller\n", 894 "I/O %d QID %d timeout, reset controller\n",
894 req->tag, nvmeq->qid); 895 req->tag, nvmeq->qid);
895 nvme_dev_disable(dev, false); 896 nvme_dev_disable(dev, false);
896 queue_work(nvme_workq, &dev->reset_work); 897 nvme_reset(dev);
897 898
898 /* 899 /*
899 * Mark the request as handled, since the inline shutdown 900 * Mark the request as handled, since the inline shutdown
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1214 u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1215 u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
1215 struct nvme_queue *nvmeq; 1216 struct nvme_queue *nvmeq;
1216 1217
1217 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? 1218 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1218 NVME_CAP_NSSRC(cap) : 0; 1219 NVME_CAP_NSSRC(cap) : 0;
1219 1220
1220 if (dev->subsystem && 1221 if (dev->subsystem &&
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
1291 1292
1292 /* Skip controllers under certain specific conditions. */ 1293 /* Skip controllers under certain specific conditions. */
1293 if (nvme_should_reset(dev, csts)) { 1294 if (nvme_should_reset(dev, csts)) {
1294 if (queue_work(nvme_workq, &dev->reset_work)) 1295 if (!nvme_reset(dev))
1295 dev_warn(dev->dev, 1296 dev_warn(dev->dev,
1296 "Failed status: 0x%x, reset controller.\n", 1297 "Failed status: 0x%x, reset controller.\n",
1297 csts); 1298 csts);
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1331 return ret >= 0 ? 0 : ret; 1332 return ret >= 0 ? 0 : ret;
1332} 1333}
1333 1334
1335static ssize_t nvme_cmb_show(struct device *dev,
1336 struct device_attribute *attr,
1337 char *buf)
1338{
1339 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
1340
1341 return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
1342 ndev->cmbloc, ndev->cmbsz);
1343}
1344static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
1345
1334static void __iomem *nvme_map_cmb(struct nvme_dev *dev) 1346static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1335{ 1347{
1336 u64 szu, size, offset; 1348 u64 szu, size, offset;
1337 u32 cmbloc;
1338 resource_size_t bar_size; 1349 resource_size_t bar_size;
1339 struct pci_dev *pdev = to_pci_dev(dev->dev); 1350 struct pci_dev *pdev = to_pci_dev(dev->dev);
1340 void __iomem *cmb; 1351 void __iomem *cmb;
1341 dma_addr_t dma_addr; 1352 dma_addr_t dma_addr;
1342 1353
1343 if (!use_cmb_sqes)
1344 return NULL;
1345
1346 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1354 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1347 if (!(NVME_CMB_SZ(dev->cmbsz))) 1355 if (!(NVME_CMB_SZ(dev->cmbsz)))
1348 return NULL; 1356 return NULL;
1357 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1349 1358
1350 cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 1359 if (!use_cmb_sqes)
1360 return NULL;
1351 1361
1352 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1362 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1353 size = szu * NVME_CMB_SZ(dev->cmbsz); 1363 size = szu * NVME_CMB_SZ(dev->cmbsz);
1354 offset = szu * NVME_CMB_OFST(cmbloc); 1364 offset = szu * NVME_CMB_OFST(dev->cmbloc);
1355 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); 1365 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
1356 1366
1357 if (offset > bar_size) 1367 if (offset > bar_size)
1358 return NULL; 1368 return NULL;
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1365 if (size > bar_size - offset) 1375 if (size > bar_size - offset)
1366 size = bar_size - offset; 1376 size = bar_size - offset;
1367 1377
1368 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; 1378 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
1369 cmb = ioremap_wc(dma_addr, size); 1379 cmb = ioremap_wc(dma_addr, size);
1370 if (!cmb) 1380 if (!cmb)
1371 return NULL; 1381 return NULL;
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1511 return 0; 1521 return 0;
1512} 1522}
1513 1523
1514static void nvme_disable_io_queues(struct nvme_dev *dev) 1524static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
1515{ 1525{
1516 int pass, queues = dev->online_queues - 1; 1526 int pass;
1517 unsigned long timeout; 1527 unsigned long timeout;
1518 u8 opcode = nvme_admin_delete_sq; 1528 u8 opcode = nvme_admin_delete_sq;
1519 1529
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1616 dev->q_depth); 1626 dev->q_depth);
1617 } 1627 }
1618 1628
1619 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) 1629 /*
1630 * CMBs can currently only exist on >=1.2 PCIe devices. We only
1631 * populate sysfs if a CMB is implemented. Note that we add the
1632 * CMB attribute to the nvme_ctrl kobj which removes the need to remove
1633 * it on exit. Since nvme_dev_attrs_group has no name we can pass
1634 * NULL as final argument to sysfs_add_file_to_group.
1635 */
1636
1637 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
1620 dev->cmb = nvme_map_cmb(dev); 1638 dev->cmb = nvme_map_cmb(dev);
1621 1639
1640 if (dev->cmbsz) {
1641 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1642 &dev_attr_cmb.attr, NULL))
1643 dev_warn(dev->dev,
1644 "failed to add sysfs attribute for CMB\n");
1645 }
1646 }
1647
1622 pci_enable_pcie_error_reporting(pdev); 1648 pci_enable_pcie_error_reporting(pdev);
1623 pci_save_state(pdev); 1649 pci_save_state(pdev);
1624 return 0; 1650 return 0;
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
1649 1675
1650static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 1676static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1651{ 1677{
1652 int i; 1678 int i, queues;
1653 u32 csts = -1; 1679 u32 csts = -1;
1654 1680
1655 del_timer_sync(&dev->watchdog_timer); 1681 del_timer_sync(&dev->watchdog_timer);
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1660 csts = readl(dev->bar + NVME_REG_CSTS); 1686 csts = readl(dev->bar + NVME_REG_CSTS);
1661 } 1687 }
1662 1688
1689 queues = dev->online_queues - 1;
1663 for (i = dev->queue_count - 1; i > 0; i--) 1690 for (i = dev->queue_count - 1; i > 0; i--)
1664 nvme_suspend_queue(dev->queues[i]); 1691 nvme_suspend_queue(dev->queues[i]);
1665 1692
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1671 if (dev->queue_count) 1698 if (dev->queue_count)
1672 nvme_suspend_queue(dev->queues[0]); 1699 nvme_suspend_queue(dev->queues[0]);
1673 } else { 1700 } else {
1674 nvme_disable_io_queues(dev); 1701 nvme_disable_io_queues(dev, queues);
1675 nvme_disable_admin_queue(dev, shutdown); 1702 nvme_disable_admin_queue(dev, shutdown);
1676 } 1703 }
1677 nvme_pci_disable(dev); 1704 nvme_pci_disable(dev);
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
1818{ 1845{
1819 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 1846 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
1820 return -ENODEV; 1847 return -ENODEV;
1821 1848 if (work_busy(&dev->reset_work))
1849 return -ENODEV;
1822 if (!queue_work(nvme_workq, &dev->reset_work)) 1850 if (!queue_work(nvme_workq, &dev->reset_work))
1823 return -EBUSY; 1851 return -EBUSY;
1824
1825 flush_work(&dev->reset_work);
1826 return 0; 1852 return 0;
1827} 1853}
1828 1854
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1846 1872
1847static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) 1873static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
1848{ 1874{
1849 return nvme_reset(to_nvme_dev(ctrl)); 1875 struct nvme_dev *dev = to_nvme_dev(ctrl);
1876 int ret = nvme_reset(dev);
1877
1878 if (!ret)
1879 flush_work(&dev->reset_work);
1880 return ret;
1850} 1881}
1851 1882
1852static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 1883static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
1940 if (prepare) 1971 if (prepare)
1941 nvme_dev_disable(dev, false); 1972 nvme_dev_disable(dev, false);
1942 else 1973 else
1943 queue_work(nvme_workq, &dev->reset_work); 1974 nvme_reset(dev);
1944} 1975}
1945 1976
1946static void nvme_shutdown(struct pci_dev *pdev) 1977static void nvme_shutdown(struct pci_dev *pdev)
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
2009 struct pci_dev *pdev = to_pci_dev(dev); 2040 struct pci_dev *pdev = to_pci_dev(dev);
2010 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2041 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2011 2042
2012 queue_work(nvme_workq, &ndev->reset_work); 2043 nvme_reset(ndev);
2013 return 0; 2044 return 0;
2014} 2045}
2015#endif 2046#endif
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
2048 2079
2049 dev_info(dev->ctrl.device, "restart after slot reset\n"); 2080 dev_info(dev->ctrl.device, "restart after slot reset\n");
2050 pci_restore_state(pdev); 2081 pci_restore_state(pdev);
2051 queue_work(nvme_workq, &dev->reset_work); 2082 nvme_reset(dev);
2052 return PCI_ERS_RESULT_RECOVERED; 2083 return PCI_ERS_RESULT_RECOVERED;
2053} 2084}
2054 2085
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index c2a0a1c7d05d..3eaa4d27801e 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
606 eui = id_ns->eui64; 606 eui = id_ns->eui64;
607 len = sizeof(id_ns->eui64); 607 len = sizeof(id_ns->eui64);
608 608
609 if (ns->ctrl->vs >= NVME_VS(1, 2)) { 609 if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
610 if (bitmap_empty(eui, len * 8)) { 610 if (bitmap_empty(eui, len * 8)) {
611 eui = id_ns->nguid; 611 eui = id_ns->nguid;
612 len = sizeof(id_ns->nguid); 612 len = sizeof(id_ns->nguid);
@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
679{ 679{
680 int res; 680 int res;
681 681
682 if (ns->ctrl->vs >= NVME_VS(1, 1)) { 682 if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
683 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); 683 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
684 if (res != -EOPNOTSUPP) 684 if (res != -EOPNOTSUPP)
685 return res; 685 return res;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 7ab9c9381b98..6fe4c48a21e4 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
199 */ 199 */
200 200
201 /* we support multiple ports and multiples hosts: */ 201 /* we support multiple ports and multiples hosts: */
202 id->mic = (1 << 0) | (1 << 1); 202 id->cmic = (1 << 0) | (1 << 1);
203 203
204 /* no limit on data transfer sizes for now */ 204 /* no limit on data transfer sizes for now */
205 id->mdts = 0; 205 id->mdts = 0;
@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
511 case nvme_admin_identify: 511 case nvme_admin_identify:
512 req->data_len = 4096; 512 req->data_len = 4096;
513 switch (le32_to_cpu(cmd->identify.cns)) { 513 switch (le32_to_cpu(cmd->identify.cns)) {
514 case 0x00: 514 case NVME_ID_CNS_NS:
515 req->execute = nvmet_execute_identify_ns; 515 req->execute = nvmet_execute_identify_ns;
516 return 0; 516 return 0;
517 case 0x01: 517 case NVME_ID_CNS_CTRL:
518 req->execute = nvmet_execute_identify_ctrl; 518 req->execute = nvmet_execute_identify_ctrl;
519 return 0; 519 return 0;
520 case 0x02: 520 case NVME_ID_CNS_NS_ACTIVE_LIST:
521 req->execute = nvmet_execute_identify_nslist; 521 req->execute = nvmet_execute_identify_nslist;
522 return 0; 522 return 0;
523 } 523 }
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..b4cacb6f0258 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
882 if (!subsys) 882 if (!subsys)
883 return NULL; 883 return NULL;
884 884
885 subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ 885 subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
886 886
887 switch (type) { 887 switch (type) {
888 case NVME_NQN_NVME: 888 case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 6f65646e89cf..12f39eea569f 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
54 /* we support only dynamic controllers */ 54 /* we support only dynamic controllers */
55 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); 55 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
56 e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); 56 e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
57 e->nqntype = type; 57 e->subtype = type;
58 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); 58 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
59 memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 59 memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
60 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); 60 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
187 case nvme_admin_identify: 187 case nvme_admin_identify:
188 req->data_len = 4096; 188 req->data_len = 4096;
189 switch (le32_to_cpu(cmd->identify.cns)) { 189 switch (le32_to_cpu(cmd->identify.cns)) {
190 case 0x01: 190 case NVME_ID_CNS_CTRL:
191 req->execute = 191 req->execute =
192 nvmet_execute_identify_disc_ctrl; 192 nvmet_execute_identify_disc_ctrl;
193 return 0; 193 return 0;
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 2cb7315e26d0..653707996342 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
247 247
248 pp = &pcie->pp; 248 pp = &pcie->pp;
249 pp->dev = dev; 249 pp->dev = dev;
250 pcie->drvdata = match->data;
250 pp->ops = pcie->drvdata->ops; 251 pp->ops = pcie->drvdata->ops;
251 252
252 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 253 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
@@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
256 return PTR_ERR(pcie->pp.dbi_base); 257 return PTR_ERR(pcie->pp.dbi_base);
257 } 258 }
258 259
259 pcie->drvdata = match->data;
260 pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset; 260 pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
261 261
262 if (!ls_pcie_is_bridge(pcie)) 262 if (!ls_pcie_is_bridge(pcie))
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 537f58a664fa..8df6312ed300 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) 4 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
5 * 5 *
6 * Authors: Joao Pinto <jpinto@synopsys.com> 6 * Authors: Joao Pinto <jpmpinto@gmail.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index bfdd0744b686..ad70507cfb56 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev)
610 * msi_capability_init - configure device's MSI capability structure 610 * msi_capability_init - configure device's MSI capability structure
611 * @dev: pointer to the pci_dev data structure of MSI device function 611 * @dev: pointer to the pci_dev data structure of MSI device function
612 * @nvec: number of interrupts to allocate 612 * @nvec: number of interrupts to allocate
613 * @affinity: flag to indicate cpu irq affinity mask should be set
613 * 614 *
614 * Setup the MSI capability structure of the device with the requested 615 * Setup the MSI capability structure of the device with the requested
615 * number of interrupts. A return value of zero indicates the successful 616 * number of interrupts. A return value of zero indicates the successful
@@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev,
752 * @dev: pointer to the pci_dev data structure of MSI-X device function 753 * @dev: pointer to the pci_dev data structure of MSI-X device function
753 * @entries: pointer to an array of struct msix_entry entries 754 * @entries: pointer to an array of struct msix_entry entries
754 * @nvec: number of @entries 755 * @nvec: number of @entries
756 * @affinity: flag to indicate cpu irq affinity mask should be set
755 * 757 *
756 * Setup the MSI-X capability structure of device function with a 758 * Setup the MSI-X capability structure of device function with a
757 * single MSI-X irq. A return of zero indicates the successful setup of 759 * single MSI-X irq. A return of zero indicates the successful setup of
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index c2ac7646b99f..a8ac4bcef2c0 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1011 rc = acpi_dev_get_resources(adev, &resource_list, 1011 rc = acpi_dev_get_resources(adev, &resource_list,
1012 acpi_pmu_dev_add_resource, &res); 1012 acpi_pmu_dev_add_resource, &res);
1013 acpi_dev_free_resource_list(&resource_list); 1013 acpi_dev_free_resource_list(&resource_list);
1014 if (rc < 0 || IS_ERR(&res)) { 1014 if (rc < 0) {
1015 dev_err(dev, "PMU type %d: No resource address found\n", type); 1015 dev_err(dev, "PMU type %d: No resource address found\n", type);
1016 goto err; 1016 goto err;
1017 } 1017 }
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index e1ab864e1a7f..c8c72e8259d3 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21);
151 151
152#define GPID2_DESC SIG_DESC_SET(SCU8C, 9) 152#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
153 153
154#define D20 26 154#define F20 26
155SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC); 155SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
156SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC); 156SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
157SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC); 157SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
158SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID); 158SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
159MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN); 159MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
160 160
161#define D21 27 161#define D20 27
162SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC); 162SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
163SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC); 163SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
164SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC); 164SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
165SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID); 165SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
166MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT); 166MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
167 167
168FUNC_GROUP_DECL(GPID2, D20, D21); 168FUNC_GROUP_DECL(GPID2, F20, D20);
169 169
170#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21) 170#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
171#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12) 171#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
182SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC); 182SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
183SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC); 183SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
184SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE); 184SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
185MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT); 185MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
186 186
187FUNC_GROUP_DECL(GPIE0, B20, C20); 187FUNC_GROUP_DECL(GPIE0, B20, C20);
188 188
189#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13) 189#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
190#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
191#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
192
190#define C18 64 193#define C18 64
191SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC); 194SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
195SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
196SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
192SS_PIN_DECL(C18, GPIOI0, SYSCS); 197SS_PIN_DECL(C18, GPIOI0, SYSCS);
193 198
194#define E15 65 199#define E15 65
195SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC); 200SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
201SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
202SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
196SS_PIN_DECL(E15, GPIOI1, SYSCK); 203SS_PIN_DECL(E15, GPIOI1, SYSCK);
197 204
198#define A14 66 205#define B16 66
199SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC); 206SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
200SS_PIN_DECL(A14, GPIOI2, SYSMOSI); 207SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
208SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
209SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
201 210
202#define C16 67 211#define C16 67
203SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC); 212SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
213SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
214SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
204SS_PIN_DECL(C16, GPIOI3, SYSMISO); 215SS_PIN_DECL(C16, GPIOI3, SYSMISO);
205 216
206FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16); 217#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
218
219#define B15 68
220SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
221SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
222SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
223SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
224 SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
225 SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
226SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
227MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
228
229#define C15 69
230SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
231SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
232SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
233SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
234 SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
235 SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
236SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
237MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
238
239#define A14 70
240SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
241SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
242SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
243SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
244 SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
245 SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
246SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
247MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
248
249#define A15 71
250SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
251SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
252SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
253SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
254 SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
255 SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
256SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
257MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
258
259FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
260FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
261FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
262FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
263
264#define R2 72
265SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
266SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
207 267
208#define L2 73 268#define L2 73
209SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9)); 269SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
580 ASPEED_PINCTRL_PIN(A12), 640 ASPEED_PINCTRL_PIN(A12),
581 ASPEED_PINCTRL_PIN(A13), 641 ASPEED_PINCTRL_PIN(A13),
582 ASPEED_PINCTRL_PIN(A14), 642 ASPEED_PINCTRL_PIN(A14),
643 ASPEED_PINCTRL_PIN(A15),
583 ASPEED_PINCTRL_PIN(A2), 644 ASPEED_PINCTRL_PIN(A2),
584 ASPEED_PINCTRL_PIN(A3), 645 ASPEED_PINCTRL_PIN(A3),
585 ASPEED_PINCTRL_PIN(A4), 646 ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
592 ASPEED_PINCTRL_PIN(B12), 653 ASPEED_PINCTRL_PIN(B12),
593 ASPEED_PINCTRL_PIN(B13), 654 ASPEED_PINCTRL_PIN(B13),
594 ASPEED_PINCTRL_PIN(B14), 655 ASPEED_PINCTRL_PIN(B14),
656 ASPEED_PINCTRL_PIN(B15),
657 ASPEED_PINCTRL_PIN(B16),
595 ASPEED_PINCTRL_PIN(B2), 658 ASPEED_PINCTRL_PIN(B2),
596 ASPEED_PINCTRL_PIN(B20), 659 ASPEED_PINCTRL_PIN(B20),
597 ASPEED_PINCTRL_PIN(B3), 660 ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
603 ASPEED_PINCTRL_PIN(C12), 666 ASPEED_PINCTRL_PIN(C12),
604 ASPEED_PINCTRL_PIN(C13), 667 ASPEED_PINCTRL_PIN(C13),
605 ASPEED_PINCTRL_PIN(C14), 668 ASPEED_PINCTRL_PIN(C14),
669 ASPEED_PINCTRL_PIN(C15),
606 ASPEED_PINCTRL_PIN(C16), 670 ASPEED_PINCTRL_PIN(C16),
607 ASPEED_PINCTRL_PIN(C18), 671 ASPEED_PINCTRL_PIN(C18),
608 ASPEED_PINCTRL_PIN(C2), 672 ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
614 ASPEED_PINCTRL_PIN(D10), 678 ASPEED_PINCTRL_PIN(D10),
615 ASPEED_PINCTRL_PIN(D2), 679 ASPEED_PINCTRL_PIN(D2),
616 ASPEED_PINCTRL_PIN(D20), 680 ASPEED_PINCTRL_PIN(D20),
617 ASPEED_PINCTRL_PIN(D21),
618 ASPEED_PINCTRL_PIN(D4), 681 ASPEED_PINCTRL_PIN(D4),
619 ASPEED_PINCTRL_PIN(D5), 682 ASPEED_PINCTRL_PIN(D5),
620 ASPEED_PINCTRL_PIN(D6), 683 ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
630 ASPEED_PINCTRL_PIN(E7), 693 ASPEED_PINCTRL_PIN(E7),
631 ASPEED_PINCTRL_PIN(E9), 694 ASPEED_PINCTRL_PIN(E9),
632 ASPEED_PINCTRL_PIN(F19), 695 ASPEED_PINCTRL_PIN(F19),
696 ASPEED_PINCTRL_PIN(F20),
633 ASPEED_PINCTRL_PIN(F9), 697 ASPEED_PINCTRL_PIN(F9),
634 ASPEED_PINCTRL_PIN(H20), 698 ASPEED_PINCTRL_PIN(H20),
635 ASPEED_PINCTRL_PIN(L1), 699 ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
691 ASPEED_PINCTRL_GROUP(RMII2), 755 ASPEED_PINCTRL_GROUP(RMII2),
692 ASPEED_PINCTRL_GROUP(SD1), 756 ASPEED_PINCTRL_GROUP(SD1),
693 ASPEED_PINCTRL_GROUP(SPI1), 757 ASPEED_PINCTRL_GROUP(SPI1),
758 ASPEED_PINCTRL_GROUP(SPI1DEBUG),
759 ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
694 ASPEED_PINCTRL_GROUP(TIMER4), 760 ASPEED_PINCTRL_GROUP(TIMER4),
695 ASPEED_PINCTRL_GROUP(TIMER5), 761 ASPEED_PINCTRL_GROUP(TIMER5),
696 ASPEED_PINCTRL_GROUP(TIMER6), 762 ASPEED_PINCTRL_GROUP(TIMER6),
697 ASPEED_PINCTRL_GROUP(TIMER7), 763 ASPEED_PINCTRL_GROUP(TIMER7),
698 ASPEED_PINCTRL_GROUP(TIMER8), 764 ASPEED_PINCTRL_GROUP(TIMER8),
765 ASPEED_PINCTRL_GROUP(VGABIOSROM),
699}; 766};
700 767
701static const struct aspeed_pin_function aspeed_g5_functions[] = { 768static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
733 ASPEED_PINCTRL_FUNC(RMII2), 800 ASPEED_PINCTRL_FUNC(RMII2),
734 ASPEED_PINCTRL_FUNC(SD1), 801 ASPEED_PINCTRL_FUNC(SD1),
735 ASPEED_PINCTRL_FUNC(SPI1), 802 ASPEED_PINCTRL_FUNC(SPI1),
803 ASPEED_PINCTRL_FUNC(SPI1DEBUG),
804 ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
736 ASPEED_PINCTRL_FUNC(TIMER4), 805 ASPEED_PINCTRL_FUNC(TIMER4),
737 ASPEED_PINCTRL_FUNC(TIMER5), 806 ASPEED_PINCTRL_FUNC(TIMER5),
738 ASPEED_PINCTRL_FUNC(TIMER6), 807 ASPEED_PINCTRL_FUNC(TIMER6),
739 ASPEED_PINCTRL_FUNC(TIMER7), 808 ASPEED_PINCTRL_FUNC(TIMER7),
740 ASPEED_PINCTRL_FUNC(TIMER8), 809 ASPEED_PINCTRL_FUNC(TIMER8),
810 ASPEED_PINCTRL_FUNC(VGABIOSROM),
741}; 811};
742 812
743static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = { 813static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 0391f9f13f3e..49aeba912531 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
166 bool enable, struct regmap *map) 166 bool enable, struct regmap *map)
167{ 167{
168 int i; 168 int i;
169 bool ret;
170
171 ret = aspeed_sig_expr_eval(expr, enable, map);
172 if (ret)
173 return ret;
174 169
175 for (i = 0; i < expr->ndescs; i++) { 170 for (i = 0; i < expr->ndescs; i++) {
171 bool ret;
176 const struct aspeed_sig_desc *desc = &expr->descs[i]; 172 const struct aspeed_sig_desc *desc = &expr->descs[i];
177 u32 pattern = enable ? desc->enable : desc->disable; 173 u32 pattern = enable ? desc->enable : desc->disable;
178 174
@@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
199static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr, 195static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
200 struct regmap *map) 196 struct regmap *map)
201{ 197{
198 if (aspeed_sig_expr_eval(expr, true, map))
199 return true;
200
202 return aspeed_sig_expr_set(expr, true, map); 201 return aspeed_sig_expr_set(expr, true, map);
203} 202}
204 203
205static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr, 204static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
206 struct regmap *map) 205 struct regmap *map)
207{ 206{
207 if (!aspeed_sig_expr_eval(expr, true, map))
208 return true;
209
208 return aspeed_sig_expr_set(expr, false, map); 210 return aspeed_sig_expr_set(expr, false, map);
209} 211}
210 212
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe2e6df..71bbeb9321ba 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
1808 return PTR_ERR(vg->pctl_dev); 1808 return PTR_ERR(vg->pctl_dev);
1809 } 1809 }
1810 1810
1811 raw_spin_lock_init(&vg->lock);
1812
1811 ret = byt_gpio_probe(vg); 1813 ret = byt_gpio_probe(vg);
1812 if (ret) { 1814 if (ret) {
1813 pinctrl_unregister(vg->pctl_dev); 1815 pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
1815 } 1817 }
1816 1818
1817 platform_set_drvdata(pdev, vg); 1819 platform_set_drvdata(pdev, vg);
1818 raw_spin_lock_init(&vg->lock);
1819 pm_runtime_enable(&pdev->dev); 1820 pm_runtime_enable(&pdev->dev);
1820 1821
1821 return 0; 1822 return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 63387a40b973..01443762e570 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
19#include <linux/pinctrl/pinconf.h> 19#include <linux/pinctrl/pinconf.h>
20#include <linux/pinctrl/pinconf-generic.h> 20#include <linux/pinctrl/pinconf-generic.h>
21 21
22#include "../core.h"
22#include "pinctrl-intel.h" 23#include "pinctrl-intel.h"
23 24
24/* Offset from regs */ 25/* Offset from regs */
@@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
1056EXPORT_SYMBOL_GPL(intel_pinctrl_remove); 1057EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
1057 1058
1058#ifdef CONFIG_PM_SLEEP 1059#ifdef CONFIG_PM_SLEEP
1060static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
1061{
1062 const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
1063
1064 if (!pd || !intel_pad_usable(pctrl, pin))
1065 return false;
1066
1067 /*
1068 * Only restore the pin if it is actually in use by the kernel (or
1069 * by userspace). It is possible that some pins are used by the
1070 * BIOS during resume and those are not always locked down so leave
1071 * them alone.
1072 */
1073 if (pd->mux_owner || pd->gpio_owner ||
1074 gpiochip_line_is_irq(&pctrl->chip, pin))
1075 return true;
1076
1077 return false;
1078}
1079
1059int intel_pinctrl_suspend(struct device *dev) 1080int intel_pinctrl_suspend(struct device *dev)
1060{ 1081{
1061 struct platform_device *pdev = to_platform_device(dev); 1082 struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev)
1069 const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; 1090 const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
1070 u32 val; 1091 u32 val;
1071 1092
1072 if (!intel_pad_usable(pctrl, desc->number)) 1093 if (!intel_pinctrl_should_save(pctrl, desc->number))
1073 continue; 1094 continue;
1074 1095
1075 val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0)); 1096 val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev)
1130 void __iomem *padcfg; 1151 void __iomem *padcfg;
1131 u32 val; 1152 u32 val;
1132 1153
1133 if (!intel_pad_usable(pctrl, desc->number)) 1154 if (!intel_pinctrl_should_save(pctrl, desc->number))
1134 continue; 1155 continue;
1135 1156
1136 padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0); 1157 padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d79d040..1aba2c74160e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
309 * much memory to the process. 309 * much memory to the process.
310 */ 310 */
311 down_read(&current->mm->mmap_sem); 311 down_read(&current->mm->mmap_sem);
312 ret = get_user_pages(address, 1, !is_write, 0, &page, NULL); 312 ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
313 &page, NULL);
313 up_read(&current->mm->mmap_sem); 314 up_read(&current->mm->mmap_sem);
314 if (ret < 0) 315 if (ret < 0)
315 break; 316 break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 81b8dcca8891..b8a21d7b25d4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -576,6 +576,7 @@ config ASUS_WMI
576config ASUS_NB_WMI 576config ASUS_NB_WMI
577 tristate "Asus Notebook WMI Driver" 577 tristate "Asus Notebook WMI Driver"
578 depends on ASUS_WMI 578 depends on ASUS_WMI
579 depends on SERIO_I8042 || SERIO_I8042 = n
579 ---help--- 580 ---help---
580 This is a driver for newer Asus notebooks. It adds extra features 581 This is a driver for newer Asus notebooks. It adds extra features
581 like wireless radio and bluetooth control, leds, hotkeys, backlight... 582 like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d1a091b93192..a2323941e677 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
933 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"), 933 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
934 }, 934 },
935 }, 935 },
936 {
937 .ident = "Lenovo YOGA 910-13IKB",
938 .matches = {
939 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
940 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
941 },
942 },
936 {} 943 {}
937}; 944};
938 945
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe871d32..9013a585507e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
892 down_read(&current->mm->mmap_sem); 892 down_read(&current->mm->mmap_sem);
893 pinned = get_user_pages( 893 pinned = get_user_pages(
894 (unsigned long)xfer->loc_addr & PAGE_MASK, 894 (unsigned long)xfer->loc_addr & PAGE_MASK,
895 nr_pages, dir == DMA_FROM_DEVICE, 0, 895 nr_pages,
896 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
896 page_list, NULL); 897 page_list, NULL);
897 up_read(&current->mm->mmap_sem); 898 up_read(&current->mm->mmap_sem);
898 899
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 831935af7389..a7a88476e215 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1205 mdc, lpm); 1205 mdc, lpm);
1206 return mdc; 1206 return mdc;
1207 } 1207 }
1208 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR; 1208 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1209 if (fcx_max_data < private->fcx_max_data) { 1209 if (fcx_max_data < private->fcx_max_data) {
1210 dev_warn(&device->cdev->dev, 1210 dev_warn(&device->cdev->dev,
1211 "The maximum data size for zHPF requests %u " 1211 "The maximum data size for zHPF requests %u "
@@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
1675 " data size for zHPF requests failed\n"); 1675 " data size for zHPF requests failed\n");
1676 return 0; 1676 return 0;
1677 } else 1677 } else
1678 return mdc * FCX_MAX_DATA_FACTOR; 1678 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1679} 1679}
1680 1680
1681/* 1681/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 46be25c7461e..876c7e6e3a99 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -780,7 +780,7 @@ static int cfg_wait_idle(void)
780static int __init chp_init(void) 780static int __init chp_init(void)
781{ 781{
782 struct chp_id chpid; 782 struct chp_id chpid;
783 int ret; 783 int state, ret;
784 784
785 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); 785 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
786 if (ret) 786 if (ret)
@@ -791,7 +791,9 @@ static int __init chp_init(void)
791 return 0; 791 return 0;
792 /* Register available channel-paths. */ 792 /* Register available channel-paths. */
793 chp_id_for_each(&chpid) { 793 chp_id_for_each(&chpid) {
794 if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) 794 state = chp_info_get_status(chpid);
795 if (state == CHP_STATUS_CONFIGURED ||
796 state == CHP_STATUS_STANDBY)
795 chp_new(chpid); 797 chp_new(chpid);
796 } 798 }
797 799
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 637cf8973c9e..581001989937 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
384 /* if (len > rec_len): 384 /* if (len > rec_len):
385 * dump data up to cap_len ignoring small duplicate in rec->payload 385 * dump data up to cap_len ignoring small duplicate in rec->payload
386 */ 386 */
387 spin_lock_irqsave(&dbf->pay_lock, flags); 387 spin_lock(&dbf->pay_lock);
388 memset(payload, 0, sizeof(*payload)); 388 memset(payload, 0, sizeof(*payload));
389 memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); 389 memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
390 payload->fsf_req_id = req_id; 390 payload->fsf_req_id = req_id;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index db2739079cbb..790babc5ef66 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
353#endif 353#endif
354 354
355 355
356static int probe_irq __initdata; 356static int probe_irq;
357 357
358/** 358/**
359 * probe_intr - helper for IRQ autoprobe 359 * probe_intr - helper for IRQ autoprobe
@@ -365,7 +365,7 @@ static int probe_irq __initdata;
365 * used by the IRQ probe code. 365 * used by the IRQ probe code.
366 */ 366 */
367 367
368static irqreturn_t __init probe_intr(int irq, void *dev_id) 368static irqreturn_t probe_intr(int irq, void *dev_id)
369{ 369{
370 probe_irq = irq; 370 probe_irq = irq;
371 return IRQ_HANDLED; 371 return IRQ_HANDLED;
@@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
380 * and then looking to see what interrupt actually turned up. 380 * and then looking to see what interrupt actually turned up.
381 */ 381 */
382 382
383static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, 383static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
384 int possible) 384 int possible)
385{ 385{
386 struct NCR5380_hostdata *hostdata = shost_priv(instance); 386 struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 68138a647dfc..d9239c2d49b1 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
900static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 900static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
901{ 901{
902 struct sgl_handle *psgl_handle; 902 struct sgl_handle *psgl_handle;
903 unsigned long flags;
903 904
904 spin_lock_bh(&phba->io_sgl_lock); 905 spin_lock_irqsave(&phba->io_sgl_lock, flags);
905 if (phba->io_sgl_hndl_avbl) { 906 if (phba->io_sgl_hndl_avbl) {
906 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 907 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
907 "BM_%d : In alloc_io_sgl_handle," 908 "BM_%d : In alloc_io_sgl_handle,"
@@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
919 phba->io_sgl_alloc_index++; 920 phba->io_sgl_alloc_index++;
920 } else 921 } else
921 psgl_handle = NULL; 922 psgl_handle = NULL;
922 spin_unlock_bh(&phba->io_sgl_lock); 923 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
923 return psgl_handle; 924 return psgl_handle;
924} 925}
925 926
926static void 927static void
927free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 928free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
928{ 929{
929 spin_lock_bh(&phba->io_sgl_lock); 930 unsigned long flags;
931
932 spin_lock_irqsave(&phba->io_sgl_lock, flags);
930 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 933 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
931 "BM_%d : In free_,io_sgl_free_index=%d\n", 934 "BM_%d : In free_,io_sgl_free_index=%d\n",
932 phba->io_sgl_free_index); 935 phba->io_sgl_free_index);
@@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
941 "value there=%p\n", phba->io_sgl_free_index, 944 "value there=%p\n", phba->io_sgl_free_index,
942 phba->io_sgl_hndl_base 945 phba->io_sgl_hndl_base
943 [phba->io_sgl_free_index]); 946 [phba->io_sgl_free_index]);
944 spin_unlock_bh(&phba->io_sgl_lock); 947 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
945 return; 948 return;
946 } 949 }
947 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 950 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
950 phba->io_sgl_free_index = 0; 953 phba->io_sgl_free_index = 0;
951 else 954 else
952 phba->io_sgl_free_index++; 955 phba->io_sgl_free_index++;
953 spin_unlock_bh(&phba->io_sgl_lock); 956 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
954} 957}
955 958
956static inline struct wrb_handle * 959static inline struct wrb_handle *
@@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
958 unsigned int wrbs_per_cxn) 961 unsigned int wrbs_per_cxn)
959{ 962{
960 struct wrb_handle *pwrb_handle; 963 struct wrb_handle *pwrb_handle;
964 unsigned long flags;
961 965
962 spin_lock_bh(&pwrb_context->wrb_lock); 966 spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
963 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 967 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
964 pwrb_context->wrb_handles_available--; 968 pwrb_context->wrb_handles_available--;
965 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 969 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
966 pwrb_context->alloc_index = 0; 970 pwrb_context->alloc_index = 0;
967 else 971 else
968 pwrb_context->alloc_index++; 972 pwrb_context->alloc_index++;
969 spin_unlock_bh(&pwrb_context->wrb_lock); 973 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
970 974
971 if (pwrb_handle) 975 if (pwrb_handle)
972 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 976 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
@@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
1001 struct wrb_handle *pwrb_handle, 1005 struct wrb_handle *pwrb_handle,
1002 unsigned int wrbs_per_cxn) 1006 unsigned int wrbs_per_cxn)
1003{ 1007{
1004 spin_lock_bh(&pwrb_context->wrb_lock); 1008 unsigned long flags;
1009
1010 spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
1005 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1011 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1006 pwrb_context->wrb_handles_available++; 1012 pwrb_context->wrb_handles_available++;
1007 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1013 if (pwrb_context->free_index == (wrbs_per_cxn - 1))
1008 pwrb_context->free_index = 0; 1014 pwrb_context->free_index = 0;
1009 else 1015 else
1010 pwrb_context->free_index++; 1016 pwrb_context->free_index++;
1011 spin_unlock_bh(&pwrb_context->wrb_lock); 1017 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
1012} 1018}
1013 1019
1014/** 1020/**
@@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1037static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1043static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1038{ 1044{
1039 struct sgl_handle *psgl_handle; 1045 struct sgl_handle *psgl_handle;
1046 unsigned long flags;
1040 1047
1041 spin_lock_bh(&phba->mgmt_sgl_lock); 1048 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1042 if (phba->eh_sgl_hndl_avbl) { 1049 if (phba->eh_sgl_hndl_avbl) {
1043 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1050 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1044 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1051 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1056 phba->eh_sgl_alloc_index++; 1063 phba->eh_sgl_alloc_index++;
1057 } else 1064 } else
1058 psgl_handle = NULL; 1065 psgl_handle = NULL;
1059 spin_unlock_bh(&phba->mgmt_sgl_lock); 1066 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1060 return psgl_handle; 1067 return psgl_handle;
1061} 1068}
1062 1069
1063void 1070void
1064free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1071free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1065{ 1072{
1066 spin_lock_bh(&phba->mgmt_sgl_lock); 1073 unsigned long flags;
1074
1075 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1067 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1076 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1068 "BM_%d : In free_mgmt_sgl_handle," 1077 "BM_%d : In free_mgmt_sgl_handle,"
1069 "eh_sgl_free_index=%d\n", 1078 "eh_sgl_free_index=%d\n",
@@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1078 "BM_%d : Double Free in eh SGL ," 1087 "BM_%d : Double Free in eh SGL ,"
1079 "eh_sgl_free_index=%d\n", 1088 "eh_sgl_free_index=%d\n",
1080 phba->eh_sgl_free_index); 1089 phba->eh_sgl_free_index);
1081 spin_unlock_bh(&phba->mgmt_sgl_lock); 1090 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1082 return; 1091 return;
1083 } 1092 }
1084 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1093 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1088 phba->eh_sgl_free_index = 0; 1097 phba->eh_sgl_free_index = 0;
1089 else 1098 else
1090 phba->eh_sgl_free_index++; 1099 phba->eh_sgl_free_index++;
1091 spin_unlock_bh(&phba->mgmt_sgl_lock); 1100 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1092} 1101}
1093 1102
1094static void 1103static void
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a8762a3efeef..532474109624 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2586 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2586 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2587 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2587 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2588 u32 fd_ioasc; 2588 u32 fd_ioasc;
2589 char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
2590 2589
2591 if (ioa_cfg->sis64) 2590 if (ioa_cfg->sis64)
2592 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2591 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
@@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2607 } 2606 }
2608 2607
2609 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); 2608 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2609 schedule_work(&ioa_cfg->work_q);
2610 hostrcb = ipr_get_free_hostrcb(ioa_cfg); 2610 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2611 kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
2612 2611
2613 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2612 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2614} 2613}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c051694bfcb0..f9b6fba689ff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
791 791
792free_task: 792free_task:
793 /* regular RX path uses back_lock */ 793 /* regular RX path uses back_lock */
794 spin_lock_bh(&session->back_lock); 794 spin_lock(&session->back_lock);
795 __iscsi_put_task(task); 795 __iscsi_put_task(task);
796 spin_unlock_bh(&session->back_lock); 796 spin_unlock(&session->back_lock);
797 return NULL; 797 return NULL;
798} 798}
799 799
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 54d446c9f56e..b8d3b97b217a 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -36,9 +36,9 @@ struct scsi_dh_blist {
36}; 36};
37 37
38static const struct scsi_dh_blist scsi_dh_blist[] = { 38static const struct scsi_dh_blist scsi_dh_blist[] = {
39 {"DGC", "RAID", "clariion" }, 39 {"DGC", "RAID", "emc" },
40 {"DGC", "DISK", "clariion" }, 40 {"DGC", "DISK", "emc" },
41 {"DGC", "VRAID", "clariion" }, 41 {"DGC", "VRAID", "emc" },
42 42
43 {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, 43 {"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
44 {"COMPAQ", "HSV110", "hp_sw" }, 44 {"COMPAQ", "HSV110", "hp_sw" },
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 212e98d940bc..6f7128f49c30 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1307static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, 1307static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1308 enum scsi_scan_mode rescan) 1308 enum scsi_scan_mode rescan)
1309{ 1309{
1310 char devname[64];
1311 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 1310 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1312 unsigned int length; 1311 unsigned int length;
1313 u64 lun; 1312 u64 lun;
@@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1349 } 1348 }
1350 } 1349 }
1351 1350
1352 sprintf(devname, "host %d channel %d id %d",
1353 shost->host_no, sdev->channel, sdev->id);
1354
1355 /* 1351 /*
1356 * Allocate enough to hold the header (the same size as one scsi_lun) 1352 * Allocate enough to hold the header (the same size as one scsi_lun)
1357 * plus the number of luns we are requesting. 511 was the default 1353 * plus the number of luns we are requesting. 511 was the default
@@ -1470,12 +1466,12 @@ retry:
1470 out_err: 1466 out_err:
1471 kfree(lun_data); 1467 kfree(lun_data);
1472 out: 1468 out:
1473 scsi_device_put(sdev);
1474 if (scsi_device_created(sdev)) 1469 if (scsi_device_created(sdev))
1475 /* 1470 /*
1476 * the sdev we used didn't appear in the report luns scan 1471 * the sdev we used didn't appear in the report luns scan
1477 */ 1472 */
1478 __scsi_remove_device(sdev); 1473 __scsi_remove_device(sdev);
1474 scsi_device_put(sdev);
1479 return ret; 1475 return ret;
1480} 1476}
1481 1477
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..618422ea3a41 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4922 res = get_user_pages_unlocked( 4922 res = get_user_pages_unlocked(
4923 uaddr, 4923 uaddr,
4924 nr_pages, 4924 nr_pages,
4925 rw == READ, 4925 pages,
4926 0, /* don't force */ 4926 rw == READ ? FOLL_WRITE : 0); /* don't force */
4927 pages);
4928 4927
4929 /* Errors and no page mapped should return here */ 4928 /* Errors and no page mapped should return here */
4930 if (res < nr_pages) 4929 if (res < nr_pages)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040fdf9a7..1091b9f1dd07 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
423 actual_pages = get_user_pages(task, task->mm, 423 actual_pages = get_user_pages(task, task->mm,
424 (unsigned long)buf & ~(PAGE_SIZE - 1), 424 (unsigned long)buf & ~(PAGE_SIZE - 1),
425 num_pages, 425 num_pages,
426 (type == PAGELIST_READ) /*Write */ , 426 (type == PAGELIST_READ) ? FOLL_WRITE : 0,
427 0 /*Force */ ,
428 pages, 427 pages,
429 NULL /*vmas */); 428 NULL /*vmas */);
430 up_read(&task->mm->mmap_sem); 429 up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e07471b..7b6cd4d80621 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1477 current->mm, /* mm */ 1477 current->mm, /* mm */
1478 (unsigned long)virt_addr, /* start */ 1478 (unsigned long)virt_addr, /* start */
1479 num_pages, /* len */ 1479 num_pages, /* len */
1480 0, /* write */ 1480 0, /* gup_flags */
1481 0, /* force */
1482 pages, /* pages (array of page pointers) */ 1481 pages, /* pages (array of page pointers) */
1483 NULL); /* vmas */ 1482 NULL); /* vmas */
1484 up_read(&current->mm->mmap_sem); 1483 up_read(&current->mm->mmap_sem);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 39b928c2849d..b7d747e92c7a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1804 * Otherwise, initiator is not expecting a NOPIN is response. 1804 * Otherwise, initiator is not expecting a NOPIN is response.
1805 * Just ignore for now. 1805 * Just ignore for now.
1806 */ 1806 */
1807
1808 if (cmd)
1809 iscsit_free_cmd(cmd, false);
1810
1807 return 0; 1811 return 0;
1808} 1812}
1809EXPORT_SYMBOL(iscsit_process_nop_out); 1813EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2982 2986
2983 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," 2987 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
2984 " StatSN: 0x%08x, Length %u\n", (nopout_response) ? 2988 " StatSN: 0x%08x, Length %u\n", (nopout_response) ?
2985 "Solicitied" : "Unsolicitied", cmd->init_task_tag, 2989 "Solicited" : "Unsolicited", cmd->init_task_tag,
2986 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2990 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2987} 2991}
2988EXPORT_SYMBOL(iscsit_build_nopin_rsp); 2992EXPORT_SYMBOL(iscsit_build_nopin_rsp);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index adf419fa4291..15f79a2ca34a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
434 434
435 /* 435 /*
436 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for 436 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
437 * Immediate Data + Unsolicitied Data-OUT if necessary.. 437 * Immediate Data + Unsolicited Data-OUT if necessary..
438 */ 438 */
439 param = iscsi_find_param_from_key("MaxRecvDataSegmentLength", 439 param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
440 conn->param_list); 440 conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
646{ 646{
647 struct iscsi_session *sess = conn->sess; 647 struct iscsi_session *sess = conn->sess;
648 /* 648 /*
649 * FIXME: Unsolicitied NopIN support for ISER 649 * FIXME: Unsolicited NopIN support for ISER
650 */ 650 */
651 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) 651 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
652 return; 652 return;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6beddde..7dfefd66df93 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
754 754
755void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 755void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
756{ 756{
757 if (scsi_status != SAM_STAT_GOOD) { 757 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
758 return;
759 }
760
761 /*
762 * Calculate new residual count based upon length of SCSI data
763 * transferred.
764 */
765 if (length < cmd->data_length) {
766 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 758 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
767 cmd->residual_count += cmd->data_length - length; 759 cmd->residual_count += cmd->data_length - length;
768 } else { 760 } else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
771 } 763 }
772 764
773 cmd->data_length = length; 765 cmd->data_length = length;
774 } else if (length > cmd->data_length) {
775 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
776 cmd->residual_count = length - cmd->data_length;
777 } else {
778 cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
779 cmd->residual_count = 0;
780 } 766 }
781 767
782 target_complete_cmd(cmd, scsi_status); 768 target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1706 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1692 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1707 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1693 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1708 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1694 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1695 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1709 break; 1696 break;
1710 case TCM_OUT_OF_RESOURCES: 1697 case TCM_OUT_OF_RESOURCES:
1711 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1698 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2547 * fabric acknowledgement that requires two target_put_sess_cmd() 2534 * fabric acknowledgement that requires two target_put_sess_cmd()
2548 * invocations before se_cmd descriptor release. 2535 * invocations before se_cmd descriptor release.
2549 */ 2536 */
2550 if (ack_kref) 2537 if (ack_kref) {
2551 kref_get(&se_cmd->cmd_kref); 2538 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2539 return -EINVAL;
2540
2541 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2542 }
2552 2543
2553 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2544 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2554 if (se_sess->sess_tearing_down) { 2545 if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
2627 */ 2618 */
2628void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2619void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2629{ 2620{
2630 struct se_cmd *se_cmd; 2621 struct se_cmd *se_cmd, *tmp_cmd;
2631 unsigned long flags; 2622 unsigned long flags;
2632 int rc; 2623 int rc;
2633 2624
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2639 se_sess->sess_tearing_down = 1; 2630 se_sess->sess_tearing_down = 1;
2640 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2631 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2641 2632
2642 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { 2633 list_for_each_entry_safe(se_cmd, tmp_cmd,
2634 &se_sess->sess_wait_list, se_cmd_list) {
2643 rc = kref_get_unless_zero(&se_cmd->cmd_kref); 2635 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2644 if (rc) { 2636 if (rc) {
2645 se_cmd->cmd_wait_set = 1; 2637 se_cmd->cmd_wait_set = 1;
2646 spin_lock(&se_cmd->t_state_lock); 2638 spin_lock(&se_cmd->t_state_lock);
2647 se_cmd->transport_state |= CMD_T_FABRIC_STOP; 2639 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2648 spin_unlock(&se_cmd->t_state_lock); 2640 spin_unlock(&se_cmd->t_state_lock);
2649 } 2641 } else
2642 list_del_init(&se_cmd->se_cmd_list);
2650 } 2643 }
2651 2644
2652 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2645 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
2871 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2864 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2872 .add_sector_info = true, 2865 .add_sector_info = true,
2873 }, 2866 },
2867 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
2868 .key = COPY_ABORTED,
2869 .asc = 0x0d,
2870 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
2871
2872 },
2874 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2873 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2875 /* 2874 /*
2876 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2875 * Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 62bf4fe5704a..47562509b489 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -96,7 +96,7 @@ struct tcmu_dev {
96 size_t dev_size; 96 size_t dev_size;
97 u32 cmdr_size; 97 u32 cmdr_size;
98 u32 cmdr_last_cleaned; 98 u32 cmdr_last_cleaned;
99 /* Offset of data ring from start of mb */ 99 /* Offset of data area from start of mb */
100 /* Must add data_off and mb_addr to get the address */ 100 /* Must add data_off and mb_addr to get the address */
101 size_t data_off; 101 size_t data_off;
102 size_t data_size; 102 size_t data_size;
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
349 349
350/* 350/*
351 * We can't queue a command until we have space available on the cmd ring *and* 351 * We can't queue a command until we have space available on the cmd ring *and*
352 * space available on the data ring. 352 * space available on the data area.
353 * 353 *
354 * Called with ring lock held. 354 * Called with ring lock held.
355 */ 355 */
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
389 return true; 389 return true;
390} 390}
391 391
392static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 392static sense_reason_t
393tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
393{ 394{
394 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 395 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
395 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 396 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
405 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); 406 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
406 407
407 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 408 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
408 return -EINVAL; 409 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
409 410
410 /* 411 /*
411 * Must be a certain minimum size for response sense info, but 412 * Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
432 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 433 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
433 data_length += se_cmd->t_bidi_data_sg->length; 434 data_length += se_cmd->t_bidi_data_sg->length;
434 } 435 }
435 if ((command_size > (udev->cmdr_size / 2)) 436 if ((command_size > (udev->cmdr_size / 2)) ||
436 || data_length > udev->data_size) 437 data_length > udev->data_size) {
437 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " 438 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
438 "cmd/data ring buffers\n", command_size, data_length, 439 "cmd ring/data area\n", command_size, data_length,
439 udev->cmdr_size, udev->data_size); 440 udev->cmdr_size, udev->data_size);
441 spin_unlock_irq(&udev->cmdr_lock);
442 return TCM_INVALID_CDB_FIELD;
443 }
440 444
441 while (!is_ring_space_avail(udev, command_size, data_length)) { 445 while (!is_ring_space_avail(udev, command_size, data_length)) {
442 int ret; 446 int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
450 finish_wait(&udev->wait_cmdr, &__wait); 454 finish_wait(&udev->wait_cmdr, &__wait);
451 if (!ret) { 455 if (!ret) {
452 pr_warn("tcmu: command timed out\n"); 456 pr_warn("tcmu: command timed out\n");
453 return -ETIMEDOUT; 457 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
454 } 458 }
455 459
456 spin_lock_irq(&udev->cmdr_lock); 460 spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
487 491
488 bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS); 492 bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
489 493
490 /* 494 /* Handle allocating space from the data area */
491 * Fix up iovecs, and handle if allocation in data ring wrapped.
492 */
493 iov = &entry->req.iov[0]; 495 iov = &entry->req.iov[0];
494 iov_cnt = 0; 496 iov_cnt = 0;
495 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 497 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
526 mod_timer(&udev->timeout, 528 mod_timer(&udev->timeout,
527 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 529 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
528 530
529 return 0; 531 return TCM_NO_SENSE;
530} 532}
531 533
532static int tcmu_queue_cmd(struct se_cmd *se_cmd) 534static sense_reason_t
535tcmu_queue_cmd(struct se_cmd *se_cmd)
533{ 536{
534 struct se_device *se_dev = se_cmd->se_dev; 537 struct se_device *se_dev = se_cmd->se_dev;
535 struct tcmu_dev *udev = TCMU_DEV(se_dev); 538 struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
538 541
539 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 542 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
540 if (!tcmu_cmd) 543 if (!tcmu_cmd)
541 return -ENOMEM; 544 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
542 545
543 ret = tcmu_queue_cmd_ring(tcmu_cmd); 546 ret = tcmu_queue_cmd_ring(tcmu_cmd);
544 if (ret < 0) { 547 if (ret != TCM_NO_SENSE) {
545 pr_err("TCMU: Could not queue command\n"); 548 pr_err("TCMU: Could not queue command\n");
546 spin_lock_irq(&udev->commands_lock); 549 spin_lock_irq(&udev->commands_lock);
547 idr_remove(&udev->commands, tcmu_cmd->cmd_id); 550 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
561 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 564 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
562 /* 565 /*
563 * cmd has been completed already from timeout, just reclaim 566 * cmd has been completed already from timeout, just reclaim
564 * data ring space and free cmd 567 * data area space and free cmd
565 */ 568 */
566 free_data_area(udev, cmd); 569 free_data_area(udev, cmd);
567 570
@@ -1129,20 +1132,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
1129} 1132}
1130 1133
1131static sense_reason_t 1134static sense_reason_t
1132tcmu_pass_op(struct se_cmd *se_cmd)
1133{
1134 int ret = tcmu_queue_cmd(se_cmd);
1135
1136 if (ret != 0)
1137 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1138 else
1139 return TCM_NO_SENSE;
1140}
1141
1142static sense_reason_t
1143tcmu_parse_cdb(struct se_cmd *cmd) 1135tcmu_parse_cdb(struct se_cmd *cmd)
1144{ 1136{
1145 return passthrough_parse_cdb(cmd, tcmu_pass_op); 1137 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1146} 1138}
1147 1139
1148static const struct target_backend_ops tcmu_ops = { 1140static const struct target_backend_ops tcmu_ops = {
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426ae3..094a1440eacb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
104 } 104 }
105 mutex_unlock(&g_device_mutex); 105 mutex_unlock(&g_device_mutex);
106 106
107 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 107 pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
108 return -EINVAL; 108 return -EINVAL;
109} 109}
110 110
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
185 185
186static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, 186static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
187 struct xcopy_op *xop, unsigned char *p, 187 struct xcopy_op *xop, unsigned char *p,
188 unsigned short tdll) 188 unsigned short tdll, sense_reason_t *sense_ret)
189{ 189{
190 struct se_device *local_dev = se_cmd->se_dev; 190 struct se_device *local_dev = se_cmd->se_dev;
191 unsigned char *desc = p; 191 unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
193 unsigned short start = 0; 193 unsigned short start = 0;
194 bool src = true; 194 bool src = true;
195 195
196 *sense_ret = TCM_INVALID_PARAMETER_LIST;
197
196 if (offset != 0) { 198 if (offset != 0) {
197 pr_err("XCOPY target descriptor list length is not" 199 pr_err("XCOPY target descriptor list length is not"
198 " multiple of %d\n", XCOPY_TARGET_DESC_LEN); 200 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
243 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); 245 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
244 else 246 else
245 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); 247 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
246 248 /*
247 if (rc < 0) 249 * If a matching IEEE NAA 0x83 descriptor for the requested device
250 * is not located on this node, return COPY_ABORTED with ASQ/ASQC
251 * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
252 * initiator to fall back to normal copy method.
253 */
254 if (rc < 0) {
255 *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
248 goto out; 256 goto out;
257 }
249 258
250 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", 259 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
251 xop->src_dev, &xop->src_tid_wwn[0]); 260 xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
653 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 662 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
654 remote_port, true); 663 remote_port, true);
655 if (rc < 0) { 664 if (rc < 0) {
665 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
656 transport_generic_free_cmd(se_cmd, 0); 666 transport_generic_free_cmd(se_cmd, 0);
657 return rc; 667 return rc;
658 } 668 }
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
664 674
665 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 675 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
666 if (rc < 0) { 676 if (rc < 0) {
677 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
667 transport_generic_free_cmd(se_cmd, 0); 678 transport_generic_free_cmd(se_cmd, 0);
668 return rc; 679 return rc;
669 } 680 }
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
714 remote_port, false); 725 remote_port, false);
715 if (rc < 0) { 726 if (rc < 0) {
716 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; 727 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
728 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
717 /* 729 /*
718 * If the failure happened before the t_mem_list hand-off in 730 * If the failure happened before the t_mem_list hand-off in
719 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that 731 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
729 741
730 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 742 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
731 if (rc < 0) { 743 if (rc < 0) {
744 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
732 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 745 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
733 transport_generic_free_cmd(se_cmd, 0); 746 transport_generic_free_cmd(se_cmd, 0);
734 return rc; 747 return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
815out: 828out:
816 xcopy_pt_undepend_remotedev(xop); 829 xcopy_pt_undepend_remotedev(xop);
817 kfree(xop); 830 kfree(xop);
818 831 /*
819 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); 832 * Don't override an error scsi status if it has already been set
820 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 833 */
834 if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
835 pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
836 " CHECK_CONDITION -> sending response\n", rc);
837 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
838 }
821 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 839 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
822} 840}
823 841
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
875 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 893 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
876 tdll, sdll, inline_dl); 894 tdll, sdll, inline_dl);
877 895
878 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); 896 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
879 if (rc <= 0) 897 if (rc <= 0)
880 goto out; 898 goto out;
881 899
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9133..ff5de9a96643 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
572 if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, 572 if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
573 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), 573 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
574 ntohl(fcp->fc_dl), task_attr, data_dir, 574 ntohl(fcp->fc_dl), task_attr, data_dir,
575 TARGET_SCF_ACK_KREF)) 575 TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
576 goto err; 576 goto err;
577 577
578 pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); 578 pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
579 return; 579 return;
580 580
581err: 581err:
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6ffbb603d912..fd5c3de79470 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -39,6 +39,11 @@
39 39
40#include "tcm_fc.h" 40#include "tcm_fc.h"
41 41
42#define TFC_SESS_DBG(lport, fmt, args...) \
43 pr_debug("host%u: rport %6.6x: " fmt, \
44 (lport)->host->host_no, \
45 (lport)->port_id, ##args )
46
42static void ft_sess_delete_all(struct ft_tport *); 47static void ft_sess_delete_all(struct ft_tport *);
43 48
44/* 49/*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
167 struct ft_tport *tport; 172 struct ft_tport *tport;
168 struct hlist_head *head; 173 struct hlist_head *head;
169 struct ft_sess *sess; 174 struct ft_sess *sess;
175 char *reason = "no session created";
170 176
171 rcu_read_lock(); 177 rcu_read_lock();
172 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); 178 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
173 if (!tport) 179 if (!tport) {
180 reason = "not an FCP port";
174 goto out; 181 goto out;
182 }
175 183
176 head = &tport->hash[ft_sess_hash(port_id)]; 184 head = &tport->hash[ft_sess_hash(port_id)];
177 hlist_for_each_entry_rcu(sess, head, hash) { 185 hlist_for_each_entry_rcu(sess, head, hash) {
178 if (sess->port_id == port_id) { 186 if (sess->port_id == port_id) {
179 kref_get(&sess->kref); 187 kref_get(&sess->kref);
180 rcu_read_unlock(); 188 rcu_read_unlock();
181 pr_debug("port_id %x found %p\n", port_id, sess); 189 TFC_SESS_DBG(lport, "port_id %x found %p\n",
190 port_id, sess);
182 return sess; 191 return sess;
183 } 192 }
184 } 193 }
185out: 194out:
186 rcu_read_unlock(); 195 rcu_read_unlock();
187 pr_debug("port_id %x not found\n", port_id); 196 TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
197 port_id, reason);
188 return NULL; 198 return NULL;
189} 199}
190 200
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
195 struct ft_tport *tport = sess->tport; 205 struct ft_tport *tport = sess->tport;
196 struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)]; 206 struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
197 207
198 pr_debug("port_id %x sess %p\n", sess->port_id, sess); 208 TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
199 hlist_add_head_rcu(&sess->hash, head); 209 hlist_add_head_rcu(&sess->hash, head);
200 tport->sess_count++; 210 tport->sess_count++;
201 211
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
223 233
224 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 234 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
225 if (!sess) 235 if (!sess)
226 return NULL; 236 return ERR_PTR(-ENOMEM);
227 237
228 kref_init(&sess->kref); /* ref for table entry */ 238 kref_init(&sess->kref); /* ref for table entry */
229 sess->tport = tport; 239 sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
234 TARGET_PROT_NORMAL, &initiatorname[0], 244 TARGET_PROT_NORMAL, &initiatorname[0],
235 sess, ft_sess_alloc_cb); 245 sess, ft_sess_alloc_cb);
236 if (IS_ERR(sess->se_sess)) { 246 if (IS_ERR(sess->se_sess)) {
247 int rc = PTR_ERR(sess->se_sess);
237 kfree(sess); 248 kfree(sess);
238 return NULL; 249 sess = ERR_PTR(rc);
239 } 250 }
240 return sess; 251 return sess;
241} 252}
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
319 mutex_unlock(&ft_lport_lock); 330 mutex_unlock(&ft_lport_lock);
320 return; 331 return;
321 } 332 }
322 pr_debug("port_id %x\n", port_id); 333 TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
323 ft_sess_unhash(sess); 334 ft_sess_unhash(sess);
324 mutex_unlock(&ft_lport_lock); 335 mutex_unlock(&ft_lport_lock);
325 ft_close_sess(sess); 336 ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
379 if (!(fcp_parm & FCP_SPPF_INIT_FCN)) 390 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
380 return FC_SPP_RESP_CONF; 391 return FC_SPP_RESP_CONF;
381 sess = ft_sess_create(tport, rdata->ids.port_id, rdata); 392 sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
382 if (!sess) 393 if (IS_ERR(sess)) {
383 return FC_SPP_RESP_RES; 394 if (PTR_ERR(sess) == -EACCES) {
395 spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
396 return FC_SPP_RESP_CONF;
397 } else
398 return FC_SPP_RESP_RES;
399 }
384 if (!sess->params) 400 if (!sess->params)
385 rdata->prli_count++; 401 rdata->prli_count++;
386 sess->params = fcp_parm; 402 sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
423 mutex_lock(&ft_lport_lock); 439 mutex_lock(&ft_lport_lock);
424 ret = ft_prli_locked(rdata, spp_len, rspp, spp); 440 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
425 mutex_unlock(&ft_lport_lock); 441 mutex_unlock(&ft_lport_lock);
426 pr_debug("port_id %x flags %x ret %x\n", 442 TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
427 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); 443 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
428 return ret; 444 return ret;
429} 445}
430 446
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
477 struct ft_sess *sess; 493 struct ft_sess *sess;
478 u32 sid = fc_frame_sid(fp); 494 u32 sid = fc_frame_sid(fp);
479 495
480 pr_debug("sid %x\n", sid); 496 TFC_SESS_DBG(lport, "recv sid %x\n", sid);
481 497
482 sess = ft_sess_get(lport, sid); 498 sess = ft_sess_get(lport, sid);
483 if (!sess) { 499 if (!sess) {
484 pr_debug("sid %x sess lookup failed\n", sid); 500 TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
485 /* TBD XXX - if FCP_CMND, send PRLO */ 501 /* TBD XXX - if FCP_CMND, send PRLO */
486 fc_frame_free(fp); 502 fc_frame_free(fp);
487 return; 503 return;
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 9b4815e81b0d..19bf2028e508 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -20,10 +20,13 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/acpi.h>
23#include <linux/thermal.h> 24#include <linux/thermal.h>
24#include <linux/pm.h> 25#include <linux/pm.h>
25 26
26/* Intel PCH thermal Device IDs */ 27/* Intel PCH thermal Device IDs */
28#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */
29#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
27#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ 30#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
28#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ 31#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
29 32
@@ -66,9 +69,53 @@ struct pch_thermal_device {
66 unsigned long crt_temp; 69 unsigned long crt_temp;
67 int hot_trip_id; 70 int hot_trip_id;
68 unsigned long hot_temp; 71 unsigned long hot_temp;
72 int psv_trip_id;
73 unsigned long psv_temp;
69 bool bios_enabled; 74 bool bios_enabled;
70}; 75};
71 76
77#ifdef CONFIG_ACPI
78
79/*
80 * On some platforms, there is a companion ACPI device, which adds
81 * passive trip temperature using _PSV method. There is no specific
82 * passive temperature setting in MMIO interface of this PCI device.
83 */
84static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
85 int *nr_trips)
86{
87 struct acpi_device *adev;
88
89 ptd->psv_trip_id = -1;
90
91 adev = ACPI_COMPANION(&ptd->pdev->dev);
92 if (adev) {
93 unsigned long long r;
94 acpi_status status;
95
96 status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
97 &r);
98 if (ACPI_SUCCESS(status)) {
99 unsigned long trip_temp;
100
101 trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
102 if (trip_temp) {
103 ptd->psv_temp = trip_temp;
104 ptd->psv_trip_id = *nr_trips;
105 ++(*nr_trips);
106 }
107 }
108 }
109}
110#else
111static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
112 int *nr_trips)
113{
114 ptd->psv_trip_id = -1;
115
116}
117#endif
118
72static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) 119static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
73{ 120{
74 u8 tsel; 121 u8 tsel;
@@ -119,6 +166,8 @@ read_trips:
119 ++(*nr_trips); 166 ++(*nr_trips);
120 } 167 }
121 168
169 pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
170
122 return 0; 171 return 0;
123} 172}
124 173
@@ -194,6 +243,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
194 *type = THERMAL_TRIP_CRITICAL; 243 *type = THERMAL_TRIP_CRITICAL;
195 else if (ptd->hot_trip_id == trip) 244 else if (ptd->hot_trip_id == trip)
196 *type = THERMAL_TRIP_HOT; 245 *type = THERMAL_TRIP_HOT;
246 else if (ptd->psv_trip_id == trip)
247 *type = THERMAL_TRIP_PASSIVE;
197 else 248 else
198 return -EINVAL; 249 return -EINVAL;
199 250
@@ -208,6 +259,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
208 *temp = ptd->crt_temp; 259 *temp = ptd->crt_temp;
209 else if (ptd->hot_trip_id == trip) 260 else if (ptd->hot_trip_id == trip)
210 *temp = ptd->hot_temp; 261 *temp = ptd->hot_temp;
262 else if (ptd->psv_trip_id == trip)
263 *temp = ptd->psv_temp;
211 else 264 else
212 return -EINVAL; 265 return -EINVAL;
213 266
@@ -242,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
242 ptd->ops = &pch_dev_ops_wpt; 295 ptd->ops = &pch_dev_ops_wpt;
243 dev_name = "pch_skylake"; 296 dev_name = "pch_skylake";
244 break; 297 break;
298 case PCH_THERMAL_DID_HSW_1:
299 case PCH_THERMAL_DID_HSW_2:
300 ptd->ops = &pch_dev_ops_wpt;
301 dev_name = "pch_haswell";
302 break;
245 default: 303 default:
246 dev_err(&pdev->dev, "unknown pch thermal device\n"); 304 dev_err(&pdev->dev, "unknown pch thermal device\n");
247 return -ENODEV; 305 return -ENODEV;
@@ -324,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device)
324static struct pci_device_id intel_pch_thermal_id[] = { 382static struct pci_device_id intel_pch_thermal_id[] = {
325 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, 383 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
326 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, 384 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
385 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
386 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
327 { 0, }, 387 { 0, },
328}; 388};
329MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); 389MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 0e4dc0afcfd2..7a223074df3d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
669 .set_cur_state = powerclamp_set_cur_state, 669 .set_cur_state = powerclamp_set_cur_state,
670}; 670};
671 671
672static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
673 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
674 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
675 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
676 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
677 {}
678};
679MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
680
681static int __init powerclamp_probe(void) 672static int __init powerclamp_probe(void)
682{ 673{
683 if (!x86_match_cpu(intel_powerclamp_ids)) { 674 if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
684 pr_err("Intel powerclamp does not run on family %d model %d\n", 675 pr_err("CPU does not support MWAIT");
685 boot_cpu_data.x86, boot_cpu_data.x86_model);
686 return -ENODEV; 676 return -ENODEV;
687 } 677 }
688 678
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca4411073..a2564ab91e62 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
686 if (!pages) 686 if (!pages)
687 return -ENOMEM; 687 return -ENOMEM;
688 688
689 ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE, 689 ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
690 0, pages); 690 FOLL_WRITE);
691 691
692 if (ret < nr_pages) { 692 if (ret < nr_pages) {
693 nr_pages = ret; 693 nr_pages = ret;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3a689b..150ce2abf6c8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
245 /* Get the physical addresses of the source buffer */ 245 /* Get the physical addresses of the source buffer */
246 down_read(&current->mm->mmap_sem); 246 down_read(&current->mm->mmap_sem);
247 num_pinned = get_user_pages(param.local_vaddr - lb_offset, 247 num_pinned = get_user_pages(param.local_vaddr - lb_offset,
248 num_pages, (param.source == -1) ? READ : WRITE, 248 num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
249 0, pages, NULL); 249 pages, NULL);
250 up_read(&current->mm->mmap_sem); 250 up_read(&current->mm->mmap_sem);
251 251
252 if (num_pinned != num_pages) { 252 if (num_pinned != num_pages) {
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e473e3b23720..6d1fbda0f461 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev)
499 ret = wdat_wdt_enable_reboot(wdat); 499 ret = wdat_wdt_enable_reboot(wdat);
500 if (ret) 500 if (ret)
501 return ret; 501 return ret;
502
503 ret = wdat_wdt_ping(&wdat->wdd);
504 if (ret)
505 return ret;
502 } 506 }
503 507
504 return wdat_wdt_start(&wdat->wdd); 508 return wdat_wdt_start(&wdat->wdd);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index e12bd3635f83..26e5e8507f03 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -168,7 +168,9 @@ out:
168#endif /* CONFIG_HIBERNATE_CALLBACKS */ 168#endif /* CONFIG_HIBERNATE_CALLBACKS */
169 169
170struct shutdown_handler { 170struct shutdown_handler {
171 const char *command; 171#define SHUTDOWN_CMD_SIZE 11
172 const char command[SHUTDOWN_CMD_SIZE];
173 bool flag;
172 void (*cb)(void); 174 void (*cb)(void);
173}; 175};
174 176
@@ -206,22 +208,22 @@ static void do_reboot(void)
206 ctrl_alt_del(); 208 ctrl_alt_del();
207} 209}
208 210
211static struct shutdown_handler shutdown_handlers[] = {
212 { "poweroff", true, do_poweroff },
213 { "halt", false, do_poweroff },
214 { "reboot", true, do_reboot },
215#ifdef CONFIG_HIBERNATE_CALLBACKS
216 { "suspend", true, do_suspend },
217#endif
218};
219
209static void shutdown_handler(struct xenbus_watch *watch, 220static void shutdown_handler(struct xenbus_watch *watch,
210 const char **vec, unsigned int len) 221 const char **vec, unsigned int len)
211{ 222{
212 char *str; 223 char *str;
213 struct xenbus_transaction xbt; 224 struct xenbus_transaction xbt;
214 int err; 225 int err;
215 static struct shutdown_handler handlers[] = { 226 int idx;
216 { "poweroff", do_poweroff },
217 { "halt", do_poweroff },
218 { "reboot", do_reboot },
219#ifdef CONFIG_HIBERNATE_CALLBACKS
220 { "suspend", do_suspend },
221#endif
222 {NULL, NULL},
223 };
224 static struct shutdown_handler *handler;
225 227
226 if (shutting_down != SHUTDOWN_INVALID) 228 if (shutting_down != SHUTDOWN_INVALID)
227 return; 229 return;
@@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch,
238 return; 240 return;
239 } 241 }
240 242
241 for (handler = &handlers[0]; handler->command; handler++) { 243 for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
242 if (strcmp(str, handler->command) == 0) 244 if (strcmp(str, shutdown_handlers[idx].command) == 0)
243 break; 245 break;
244 } 246 }
245 247
246 /* Only acknowledge commands which we are prepared to handle. */ 248 /* Only acknowledge commands which we are prepared to handle. */
247 if (handler->cb) 249 if (idx < ARRAY_SIZE(shutdown_handlers))
248 xenbus_write(xbt, "control", "shutdown", ""); 250 xenbus_write(xbt, "control", "shutdown", "");
249 251
250 err = xenbus_transaction_end(xbt, 0); 252 err = xenbus_transaction_end(xbt, 0);
@@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
253 goto again; 255 goto again;
254 } 256 }
255 257
256 if (handler->cb) { 258 if (idx < ARRAY_SIZE(shutdown_handlers)) {
257 handler->cb(); 259 shutdown_handlers[idx].cb();
258 } else { 260 } else {
259 pr_info("Ignoring shutdown request: %s\n", str); 261 pr_info("Ignoring shutdown request: %s\n", str);
260 shutting_down = SHUTDOWN_INVALID; 262 shutting_down = SHUTDOWN_INVALID;
@@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = {
310static int setup_shutdown_watcher(void) 312static int setup_shutdown_watcher(void)
311{ 313{
312 int err; 314 int err;
315 int idx;
316#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-"))
317 char node[FEATURE_PATH_SIZE];
313 318
314 err = register_xenbus_watch(&shutdown_watch); 319 err = register_xenbus_watch(&shutdown_watch);
315 if (err) { 320 if (err) {
@@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void)
326 } 331 }
327#endif 332#endif
328 333
334 for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
335 if (!shutdown_handlers[idx].flag)
336 continue;
337 snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
338 shutdown_handlers[idx].command);
339 xenbus_printf(XBT_NIL, "control", node, "%u", 1);
340 }
341
329 return 0; 342 return 0;
330} 343}
331 344
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c1010f018bd8..1e8be12ebb55 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -364,7 +364,7 @@ out:
364 364
365static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) 365static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
366{ 366{
367 struct watch_adapter *watch, *tmp_watch; 367 struct watch_adapter *watch;
368 char *path, *token; 368 char *path, *token;
369 int err, rc; 369 int err, rc;
370 LIST_HEAD(staging_q); 370 LIST_HEAD(staging_q);
@@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
399 } 399 }
400 list_add(&watch->list, &u->watches); 400 list_add(&watch->list, &u->watches);
401 } else { 401 } else {
402 list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { 402 list_for_each_entry(watch, &u->watches, list) {
403 if (!strcmp(watch->token, token) && 403 if (!strcmp(watch->token, token) &&
404 !strcmp(watch->watch.node, path)) { 404 !strcmp(watch->watch.node, path)) {
405 unregister_xenbus_watch(&watch->watch); 405 unregister_xenbus_watch(&watch->watch);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 611a23119675..6d40a972ffb2 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -335,7 +335,9 @@ static int backend_state;
335static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, 335static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
336 const char **v, unsigned int l) 336 const char **v, unsigned int l)
337{ 337{
338 xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state); 338 if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
339 &backend_state) != 1)
340 backend_state = XenbusStateUnknown;
339 printk(KERN_DEBUG "XENBUS: backend %s %s\n", 341 printk(KERN_DEBUG "XENBUS: backend %s %s\n",
340 v[XS_WATCH_PATH], xenbus_strstate(backend_state)); 342 v[XS_WATCH_PATH], xenbus_strstate(backend_state));
341 wake_up(&backend_state_wq); 343 wake_up(&backend_state_wq);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ccc70d96958d..d4d8b7e36b2f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -698,7 +698,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
698 698
699 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); 699 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
700 if (ret) { 700 if (ret) {
701 bio->bi_error = ret; 701 comp_bio->bi_error = ret;
702 bio_endio(comp_bio); 702 bio_endio(comp_bio);
703 } 703 }
704 704
@@ -728,7 +728,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
728 728
729 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); 729 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
730 if (ret) { 730 if (ret) {
731 bio->bi_error = ret; 731 comp_bio->bi_error = ret;
732 bio_endio(comp_bio); 732 bio_endio(comp_bio);
733 } 733 }
734 734
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 01bc36cec26e..71261b459863 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5805,6 +5805,64 @@ static int changed_extent(struct send_ctx *sctx,
5805 int ret = 0; 5805 int ret = 0;
5806 5806
5807 if (sctx->cur_ino != sctx->cmp_key->objectid) { 5807 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5808
5809 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5810 struct extent_buffer *leaf_l;
5811 struct extent_buffer *leaf_r;
5812 struct btrfs_file_extent_item *ei_l;
5813 struct btrfs_file_extent_item *ei_r;
5814
5815 leaf_l = sctx->left_path->nodes[0];
5816 leaf_r = sctx->right_path->nodes[0];
5817 ei_l = btrfs_item_ptr(leaf_l,
5818 sctx->left_path->slots[0],
5819 struct btrfs_file_extent_item);
5820 ei_r = btrfs_item_ptr(leaf_r,
5821 sctx->right_path->slots[0],
5822 struct btrfs_file_extent_item);
5823
5824 /*
5825 * We may have found an extent item that has changed
5826 * only its disk_bytenr field and the corresponding
5827 * inode item was not updated. This case happens due to
5828 * very specific timings during relocation when a leaf
5829 * that contains file extent items is COWed while
5830 * relocation is ongoing and its in the stage where it
5831 * updates data pointers. So when this happens we can
5832 * safely ignore it since we know it's the same extent,
5833 * but just at different logical and physical locations
5834 * (when an extent is fully replaced with a new one, we
5835 * know the generation number must have changed too,
5836 * since snapshot creation implies committing the current
5837 * transaction, and the inode item must have been updated
5838 * as well).
5839 * This replacement of the disk_bytenr happens at
5840 * relocation.c:replace_file_extents() through
5841 * relocation.c:btrfs_reloc_cow_block().
5842 */
5843 if (btrfs_file_extent_generation(leaf_l, ei_l) ==
5844 btrfs_file_extent_generation(leaf_r, ei_r) &&
5845 btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
5846 btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
5847 btrfs_file_extent_compression(leaf_l, ei_l) ==
5848 btrfs_file_extent_compression(leaf_r, ei_r) &&
5849 btrfs_file_extent_encryption(leaf_l, ei_l) ==
5850 btrfs_file_extent_encryption(leaf_r, ei_r) &&
5851 btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
5852 btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
5853 btrfs_file_extent_type(leaf_l, ei_l) ==
5854 btrfs_file_extent_type(leaf_r, ei_r) &&
5855 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
5856 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
5857 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
5858 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
5859 btrfs_file_extent_offset(leaf_l, ei_l) ==
5860 btrfs_file_extent_offset(leaf_r, ei_r) &&
5861 btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
5862 btrfs_file_extent_num_bytes(leaf_r, ei_r))
5863 return 0;
5864 }
5865
5808 inconsistent_snapshot_error(sctx, result, "extent"); 5866 inconsistent_snapshot_error(sctx, result, "extent");
5809 return -EIO; 5867 return -EIO;
5810 } 5868 }
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 528cae123dc9..3d33c4e41e5f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2713 int index, int error) 2713 int index, int error)
2714{ 2714{
2715 struct btrfs_log_ctx *ctx; 2715 struct btrfs_log_ctx *ctx;
2716 struct btrfs_log_ctx *safe;
2716 2717
2717 if (!error) { 2718 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2718 INIT_LIST_HEAD(&root->log_ctxs[index]); 2719 list_del_init(&ctx->list);
2719 return;
2720 }
2721
2722 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2723 ctx->log_ret = error; 2720 ctx->log_ret = error;
2721 }
2724 2722
2725 INIT_LIST_HEAD(&root->log_ctxs[index]); 2723 INIT_LIST_HEAD(&root->log_ctxs[index]);
2726} 2724}
@@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2961 mutex_unlock(&root->log_mutex); 2959 mutex_unlock(&root->log_mutex);
2962 2960
2963out_wake_log_root: 2961out_wake_log_root:
2964 /* 2962 mutex_lock(&log_root_tree->log_mutex);
2965 * We needn't get log_mutex here because we are sure all
2966 * the other tasks are blocked.
2967 */
2968 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 2963 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2969 2964
2970 mutex_lock(&log_root_tree->log_mutex);
2971 log_root_tree->log_transid_committed++; 2965 log_root_tree->log_transid_committed++;
2972 atomic_set(&log_root_tree->log_commit[index2], 0); 2966 atomic_set(&log_root_tree->log_commit[index2], 0);
2973 mutex_unlock(&log_root_tree->log_mutex); 2967 mutex_unlock(&log_root_tree->log_mutex);
@@ -2978,10 +2972,8 @@ out_wake_log_root:
2978 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2972 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2979 wake_up(&log_root_tree->log_commit_wait[index2]); 2973 wake_up(&log_root_tree->log_commit_wait[index2]);
2980out: 2974out:
2981 /* See above. */
2982 btrfs_remove_all_log_ctxs(root, index1, ret);
2983
2984 mutex_lock(&root->log_mutex); 2975 mutex_lock(&root->log_mutex);
2976 btrfs_remove_all_log_ctxs(root, index1, ret);
2985 root->log_transid_committed++; 2977 root->log_transid_committed++;
2986 atomic_set(&root->log_commit[index1], 0); 2978 atomic_set(&root->log_commit[index1], 0);
2987 mutex_unlock(&root->log_mutex); 2979 mutex_unlock(&root->log_mutex);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 7bf08825cc11..18630e800208 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1272,7 +1272,8 @@ again:
1272 statret = __ceph_do_getattr(inode, page, 1272 statret = __ceph_do_getattr(inode, page,
1273 CEPH_STAT_CAP_INLINE_DATA, !!page); 1273 CEPH_STAT_CAP_INLINE_DATA, !!page);
1274 if (statret < 0) { 1274 if (statret < 0) {
1275 __free_page(page); 1275 if (page)
1276 __free_page(page);
1276 if (statret == -ENODATA) { 1277 if (statret == -ENODATA) {
1277 BUG_ON(retry_op != READ_INLINE); 1278 BUG_ON(retry_op != READ_INLINE);
1278 goto again; 1279 goto again;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index bca1b49c1c4b..ef4d04647325 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1511,7 +1511,8 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1511 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); 1511 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1512 } 1512 }
1513 1513
1514 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) { 1514 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
1515 !(rinfo->hash_order && req->r_path2)) {
1515 /* note dir version at start of readdir so we can tell 1516 /* note dir version at start of readdir so we can tell
1516 * if any dentries get dropped */ 1517 * if any dentries get dropped */
1517 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); 1518 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a29ffce98187..b382e5910eea 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -845,6 +845,8 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
845 err = ceph_fs_debugfs_init(fsc); 845 err = ceph_fs_debugfs_init(fsc);
846 if (err < 0) 846 if (err < 0)
847 goto fail; 847 goto fail;
848 } else {
849 root = dget(fsc->sb->s_root);
848 } 850 }
849 851
850 fsc->mount_state = CEPH_MOUNT_MOUNTED; 852 fsc->mount_state = CEPH_MOUNT_MOUNTED;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 40b703217977..febc28f9e2c2 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -16,7 +16,7 @@
16static int __remove_xattr(struct ceph_inode_info *ci, 16static int __remove_xattr(struct ceph_inode_info *ci,
17 struct ceph_inode_xattr *xattr); 17 struct ceph_inode_xattr *xattr);
18 18
19const struct xattr_handler ceph_other_xattr_handler; 19static const struct xattr_handler ceph_other_xattr_handler;
20 20
21/* 21/*
22 * List of handlers for synthetic system.* attributes. Other 22 * List of handlers for synthetic system.* attributes. Other
@@ -1086,7 +1086,7 @@ static int ceph_set_xattr_handler(const struct xattr_handler *handler,
1086 return __ceph_setxattr(inode, name, value, size, flags); 1086 return __ceph_setxattr(inode, name, value, size, flags);
1087} 1087}
1088 1088
1089const struct xattr_handler ceph_other_xattr_handler = { 1089static const struct xattr_handler ceph_other_xattr_handler = {
1090 .prefix = "", /* match any name => handlers called with full name */ 1090 .prefix = "", /* match any name => handlers called with full name */
1091 .get = ceph_get_xattr_handler, 1091 .get = ceph_get_xattr_handler,
1092 .set = ceph_set_xattr_handler, 1092 .set = ceph_set_xattr_handler,
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 61057b7dbddb..98f87fe8f186 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -151,7 +151,10 @@ static int do_page_crypto(struct inode *inode,
151 struct page *src_page, struct page *dest_page, 151 struct page *src_page, struct page *dest_page,
152 gfp_t gfp_flags) 152 gfp_t gfp_flags)
153{ 153{
154 u8 xts_tweak[FS_XTS_TWEAK_SIZE]; 154 struct {
155 __le64 index;
156 u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
157 } xts_tweak;
155 struct skcipher_request *req = NULL; 158 struct skcipher_request *req = NULL;
156 DECLARE_FS_COMPLETION_RESULT(ecr); 159 DECLARE_FS_COMPLETION_RESULT(ecr);
157 struct scatterlist dst, src; 160 struct scatterlist dst, src;
@@ -171,17 +174,15 @@ static int do_page_crypto(struct inode *inode,
171 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 174 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
172 page_crypt_complete, &ecr); 175 page_crypt_complete, &ecr);
173 176
174 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); 177 BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
175 memcpy(xts_tweak, &index, sizeof(index)); 178 xts_tweak.index = cpu_to_le64(index);
176 memset(&xts_tweak[sizeof(index)], 0, 179 memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
177 FS_XTS_TWEAK_SIZE - sizeof(index));
178 180
179 sg_init_table(&dst, 1); 181 sg_init_table(&dst, 1);
180 sg_set_page(&dst, dest_page, PAGE_SIZE, 0); 182 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
181 sg_init_table(&src, 1); 183 sg_init_table(&src, 1);
182 sg_set_page(&src, src_page, PAGE_SIZE, 0); 184 sg_set_page(&src, src_page, PAGE_SIZE, 0);
183 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, 185 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
184 xts_tweak);
185 if (rw == FS_DECRYPT) 186 if (rw == FS_DECRYPT)
186 res = crypto_skcipher_decrypt(req); 187 res = crypto_skcipher_decrypt(req);
187 else 188 else
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index ed115acb5dee..6865663aac69 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -109,6 +109,8 @@ int fscrypt_process_policy(struct file *filp,
109 if (ret) 109 if (ret)
110 return ret; 110 return ret;
111 111
112 inode_lock(inode);
113
112 if (!inode_has_encryption_context(inode)) { 114 if (!inode_has_encryption_context(inode)) {
113 if (!S_ISDIR(inode->i_mode)) 115 if (!S_ISDIR(inode->i_mode))
114 ret = -EINVAL; 116 ret = -EINVAL;
@@ -127,6 +129,8 @@ int fscrypt_process_policy(struct file *filp,
127 ret = -EINVAL; 129 ret = -EINVAL;
128 } 130 }
129 131
132 inode_unlock(inode);
133
130 mnt_drop_write_file(filp); 134 mnt_drop_write_file(filp);
131 return ret; 135 return ret;
132} 136}
diff --git a/fs/exec.c b/fs/exec.c
index 6fcfb3f7b137..4e497b9ee71e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
191{ 191{
192 struct page *page; 192 struct page *page;
193 int ret; 193 int ret;
194 unsigned int gup_flags = FOLL_FORCE;
194 195
195#ifdef CONFIG_STACK_GROWSUP 196#ifdef CONFIG_STACK_GROWSUP
196 if (write) { 197 if (write) {
@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
199 return NULL; 200 return NULL;
200 } 201 }
201#endif 202#endif
203
204 if (write)
205 gup_flags |= FOLL_WRITE;
206
202 /* 207 /*
203 * We are doing an exec(). 'current' is the process 208 * We are doing an exec(). 'current' is the process
204 * doing the exec and bprm->mm is the new process's mm. 209 * doing the exec and bprm->mm is the new process's mm.
205 */ 210 */
206 ret = get_user_pages_remote(current, bprm->mm, pos, 1, write, 211 ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
207 1, &page, NULL); 212 &page, NULL);
208 if (ret <= 0) 213 if (ret <= 0)
209 return NULL; 214 return NULL;
210 215
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 79101651fe9e..42f9a0a0c4ca 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -137,7 +137,7 @@ Espan:
137bad_entry: 137bad_entry:
138 EXOFS_ERR( 138 EXOFS_ERR(
139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " 139 "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
140 "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", 140 "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
141 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, 141 dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
142 _LLU(le64_to_cpu(p->inode_no)), 142 _LLU(le64_to_cpu(p->inode_no)),
143 rec_len, p->name_len); 143 rec_len, p->name_len);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index d831e24dc885..41b8b44a391c 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -622,7 +622,7 @@ static int ext2_get_blocks(struct inode *inode,
622 u32 *bno, bool *new, bool *boundary, 622 u32 *bno, bool *new, bool *boundary,
623 int create) 623 int create)
624{ 624{
625 int err = -EIO; 625 int err;
626 int offsets[4]; 626 int offsets[4];
627 Indirect chain[4]; 627 Indirect chain[4];
628 Indirect *partial; 628 Indirect *partial;
@@ -639,7 +639,7 @@ static int ext2_get_blocks(struct inode *inode,
639 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); 639 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
640 640
641 if (depth == 0) 641 if (depth == 0)
642 return (err); 642 return -EIO;
643 643
644 partial = ext2_get_branch(inode, depth, offsets, chain, &err); 644 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
645 /* Simplest case - block found, no allocation needed */ 645 /* Simplest case - block found, no allocation needed */
@@ -761,7 +761,6 @@ static int ext2_get_blocks(struct inode *inode,
761 ext2_splice_branch(inode, iblock, partial, indirect_blks, count); 761 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
762 mutex_unlock(&ei->truncate_mutex); 762 mutex_unlock(&ei->truncate_mutex);
763got_it: 763got_it:
764 *bno = le32_to_cpu(chain[depth-1].key);
765 if (count > blocks_to_boundary) 764 if (count > blocks_to_boundary)
766 *boundary = true; 765 *boundary = true;
767 err = count; 766 err = count;
@@ -772,6 +771,8 @@ cleanup:
772 brelse(partial->bh); 771 brelse(partial->bh);
773 partial--; 772 partial--;
774 } 773 }
774 if (err > 0)
775 *bno = le32_to_cpu(chain[depth-1].key);
775 return err; 776 return err;
776} 777}
777 778
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 02ddec6d8a7d..fdb19543af1e 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -128,12 +128,12 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
128 node = rb_first(&sbi->system_blks); 128 node = rb_first(&sbi->system_blks);
129 while (node) { 129 while (node) {
130 entry = rb_entry(node, struct ext4_system_zone, node); 130 entry = rb_entry(node, struct ext4_system_zone, node);
131 printk("%s%llu-%llu", first ? "" : ", ", 131 printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
132 entry->start_blk, entry->start_blk + entry->count - 1); 132 entry->start_blk, entry->start_blk + entry->count - 1);
133 first = 0; 133 first = 0;
134 node = rb_next(node); 134 node = rb_next(node);
135 } 135 }
136 printk("\n"); 136 printk(KERN_CONT "\n");
137} 137}
138 138
139int ext4_setup_system_zone(struct super_block *sb) 139int ext4_setup_system_zone(struct super_block *sb)
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 3ef1df6ae9ec..1aba469f8220 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -27,16 +27,15 @@
27#ifdef CONFIG_EXT4_DEBUG 27#ifdef CONFIG_EXT4_DEBUG
28extern ushort ext4_mballoc_debug; 28extern ushort ext4_mballoc_debug;
29 29
30#define mb_debug(n, fmt, a...) \ 30#define mb_debug(n, fmt, ...) \
31 do { \ 31do { \
32 if ((n) <= ext4_mballoc_debug) { \ 32 if ((n) <= ext4_mballoc_debug) { \
33 printk(KERN_DEBUG "(%s, %d): %s: ", \ 33 printk(KERN_DEBUG "(%s, %d): %s: " fmt, \
34 __FILE__, __LINE__, __func__); \ 34 __FILE__, __LINE__, __func__, ##__VA_ARGS__); \
35 printk(fmt, ## a); \ 35 } \
36 } \ 36} while (0)
37 } while (0)
38#else 37#else
39#define mb_debug(n, fmt, a...) no_printk(fmt, ## a) 38#define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
40#endif 39#endif
41 40
42#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */ 41#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f92f10d4f66a..104f8bfba718 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -577,12 +577,13 @@ static inline unsigned dx_node_limit(struct inode *dir)
577static void dx_show_index(char * label, struct dx_entry *entries) 577static void dx_show_index(char * label, struct dx_entry *entries)
578{ 578{
579 int i, n = dx_get_count (entries); 579 int i, n = dx_get_count (entries);
580 printk(KERN_DEBUG "%s index ", label); 580 printk(KERN_DEBUG "%s index", label);
581 for (i = 0; i < n; i++) { 581 for (i = 0; i < n; i++) {
582 printk("%x->%lu ", i ? dx_get_hash(entries + i) : 582 printk(KERN_CONT " %x->%lu",
583 0, (unsigned long)dx_get_block(entries + i)); 583 i ? dx_get_hash(entries + i) : 0,
584 (unsigned long)dx_get_block(entries + i));
584 } 585 }
585 printk("\n"); 586 printk(KERN_CONT "\n");
586} 587}
587 588
588struct stats 589struct stats
@@ -679,7 +680,7 @@ static struct stats dx_show_leaf(struct inode *dir,
679 } 680 }
680 de = ext4_next_entry(de, size); 681 de = ext4_next_entry(de, size);
681 } 682 }
682 printk("(%i)\n", names); 683 printk(KERN_CONT "(%i)\n", names);
683 return (struct stats) { names, space, 1 }; 684 return (struct stats) { names, space, 1 };
684} 685}
685 686
@@ -798,7 +799,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
798 q = entries + count - 1; 799 q = entries + count - 1;
799 while (p <= q) { 800 while (p <= q) {
800 m = p + (q - p) / 2; 801 m = p + (q - p) / 2;
801 dxtrace(printk(".")); 802 dxtrace(printk(KERN_CONT "."));
802 if (dx_get_hash(m) > hash) 803 if (dx_get_hash(m) > hash)
803 q = m - 1; 804 q = m - 1;
804 else 805 else
@@ -810,7 +811,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
810 at = entries; 811 at = entries;
811 while (n--) 812 while (n--)
812 { 813 {
813 dxtrace(printk(",")); 814 dxtrace(printk(KERN_CONT ","));
814 if (dx_get_hash(++at) > hash) 815 if (dx_get_hash(++at) > hash)
815 { 816 {
816 at--; 817 at--;
@@ -821,7 +822,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
821 } 822 }
822 823
823 at = p - 1; 824 at = p - 1;
824 dxtrace(printk(" %x->%u\n", at == entries ? 0 : dx_get_hash(at), 825 dxtrace(printk(KERN_CONT " %x->%u\n",
826 at == entries ? 0 : dx_get_hash(at),
825 dx_get_block(at))); 827 dx_get_block(at)));
826 frame->entries = entries; 828 frame->entries = entries;
827 frame->at = at; 829 frame->at = at;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6db81fbcbaa6..20da99da0a34 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -597,14 +597,15 @@ void __ext4_std_error(struct super_block *sb, const char *function,
597void __ext4_abort(struct super_block *sb, const char *function, 597void __ext4_abort(struct super_block *sb, const char *function,
598 unsigned int line, const char *fmt, ...) 598 unsigned int line, const char *fmt, ...)
599{ 599{
600 struct va_format vaf;
600 va_list args; 601 va_list args;
601 602
602 save_error_info(sb, function, line); 603 save_error_info(sb, function, line);
603 va_start(args, fmt); 604 va_start(args, fmt);
604 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id, 605 vaf.fmt = fmt;
605 function, line); 606 vaf.va = &args;
606 vprintk(fmt, args); 607 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
607 printk("\n"); 608 sb->s_id, function, line, &vaf);
608 va_end(args); 609 va_end(args);
609 610
610 if ((sb->s_flags & MS_RDONLY) == 0) { 611 if ((sb->s_flags & MS_RDONLY) == 0) {
@@ -2715,12 +2716,12 @@ static void print_daily_error_info(unsigned long arg)
2715 es->s_first_error_func, 2716 es->s_first_error_func,
2716 le32_to_cpu(es->s_first_error_line)); 2717 le32_to_cpu(es->s_first_error_line));
2717 if (es->s_first_error_ino) 2718 if (es->s_first_error_ino)
2718 printk(": inode %u", 2719 printk(KERN_CONT ": inode %u",
2719 le32_to_cpu(es->s_first_error_ino)); 2720 le32_to_cpu(es->s_first_error_ino));
2720 if (es->s_first_error_block) 2721 if (es->s_first_error_block)
2721 printk(": block %llu", (unsigned long long) 2722 printk(KERN_CONT ": block %llu", (unsigned long long)
2722 le64_to_cpu(es->s_first_error_block)); 2723 le64_to_cpu(es->s_first_error_block));
2723 printk("\n"); 2724 printk(KERN_CONT "\n");
2724 } 2725 }
2725 if (es->s_last_error_time) { 2726 if (es->s_last_error_time) {
2726 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", 2727 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
@@ -2729,12 +2730,12 @@ static void print_daily_error_info(unsigned long arg)
2729 es->s_last_error_func, 2730 es->s_last_error_func,
2730 le32_to_cpu(es->s_last_error_line)); 2731 le32_to_cpu(es->s_last_error_line));
2731 if (es->s_last_error_ino) 2732 if (es->s_last_error_ino)
2732 printk(": inode %u", 2733 printk(KERN_CONT ": inode %u",
2733 le32_to_cpu(es->s_last_error_ino)); 2734 le32_to_cpu(es->s_last_error_ino));
2734 if (es->s_last_error_block) 2735 if (es->s_last_error_block)
2735 printk(": block %llu", (unsigned long long) 2736 printk(KERN_CONT ": block %llu", (unsigned long long)
2736 le64_to_cpu(es->s_last_error_block)); 2737 le64_to_cpu(es->s_last_error_block));
2737 printk("\n"); 2738 printk(KERN_CONT "\n");
2738 } 2739 }
2739 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 2740 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
2740} 2741}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 73bcfd41f5f2..42145be5c6b4 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
223EXT4_ATTR_FEATURE(lazy_itable_init); 223EXT4_ATTR_FEATURE(lazy_itable_init);
224EXT4_ATTR_FEATURE(batched_discard); 224EXT4_ATTR_FEATURE(batched_discard);
225EXT4_ATTR_FEATURE(meta_bg_resize); 225EXT4_ATTR_FEATURE(meta_bg_resize);
226#ifdef CONFIG_EXT4_FS_ENCRYPTION
226EXT4_ATTR_FEATURE(encryption); 227EXT4_ATTR_FEATURE(encryption);
228#endif
227EXT4_ATTR_FEATURE(metadata_csum_seed); 229EXT4_ATTR_FEATURE(metadata_csum_seed);
228 230
229static struct attribute *ext4_feat_attrs[] = { 231static struct attribute *ext4_feat_attrs[] = {
230 ATTR_LIST(lazy_itable_init), 232 ATTR_LIST(lazy_itable_init),
231 ATTR_LIST(batched_discard), 233 ATTR_LIST(batched_discard),
232 ATTR_LIST(meta_bg_resize), 234 ATTR_LIST(meta_bg_resize),
235#ifdef CONFIG_EXT4_FS_ENCRYPTION
233 ATTR_LIST(encryption), 236 ATTR_LIST(encryption),
237#endif
234 ATTR_LIST(metadata_csum_seed), 238 ATTR_LIST(metadata_csum_seed),
235 NULL, 239 NULL,
236}; 240};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c15d63389957..d77be9e9f535 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -61,18 +61,12 @@
61#include "acl.h" 61#include "acl.h"
62 62
63#ifdef EXT4_XATTR_DEBUG 63#ifdef EXT4_XATTR_DEBUG
64# define ea_idebug(inode, f...) do { \ 64# define ea_idebug(inode, fmt, ...) \
65 printk(KERN_DEBUG "inode %s:%lu: ", \ 65 printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
66 inode->i_sb->s_id, inode->i_ino); \ 66 inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
67 printk(f); \ 67# define ea_bdebug(bh, fmt, ...) \
68 printk("\n"); \ 68 printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
69 } while (0) 69 bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
70# define ea_bdebug(bh, f...) do { \
71 printk(KERN_DEBUG "block %pg:%lu: ", \
72 bh->b_bdev, (unsigned long) bh->b_blocknr); \
73 printk(f); \
74 printk("\n"); \
75 } while (0)
76#else 70#else
77# define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__) 71# define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
78# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__) 72# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
@@ -241,7 +235,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
241 int error = -EFSCORRUPTED; 235 int error = -EFSCORRUPTED;
242 236
243 if (((void *) header >= end) || 237 if (((void *) header >= end) ||
244 (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC))) 238 (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
245 goto errout; 239 goto errout;
246 error = ext4_xattr_check_names(entry, end, entry); 240 error = ext4_xattr_check_names(entry, end, entry);
247errout: 241errout:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 93985c64d8a8..6f14ee923acd 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -852,16 +852,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
852 852
853 for (segno = start_segno; segno < end_segno; segno++) { 853 for (segno = start_segno; segno < end_segno; segno++) {
854 854
855 if (get_valid_blocks(sbi, segno, 1) == 0 ||
856 unlikely(f2fs_cp_error(sbi)))
857 goto next;
858
859 /* find segment summary of victim */ 855 /* find segment summary of victim */
860 sum_page = find_get_page(META_MAPPING(sbi), 856 sum_page = find_get_page(META_MAPPING(sbi),
861 GET_SUM_BLOCK(sbi, segno)); 857 GET_SUM_BLOCK(sbi, segno));
862 f2fs_bug_on(sbi, !PageUptodate(sum_page));
863 f2fs_put_page(sum_page, 0); 858 f2fs_put_page(sum_page, 0);
864 859
860 if (get_valid_blocks(sbi, segno, 1) == 0 ||
861 !PageUptodate(sum_page) ||
862 unlikely(f2fs_cp_error(sbi)))
863 goto next;
864
865 sum = page_address(sum_page); 865 sum = page_address(sum_page);
866 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer))); 866 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
867 867
diff --git a/fs/iomap.c b/fs/iomap.c
index 013d1d36fbbf..a8ee8c33ca78 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
433 struct page *page = data; 433 struct page *page = data;
434 int ret; 434 int ret;
435 435
436 ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length, 436 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
437 NULL, iomap);
438 if (ret) 437 if (ret)
439 return ret; 438 return ret;
440 439
@@ -561,7 +560,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
561 } 560 }
562 561
563 while (len > 0) { 562 while (len > 0) {
564 ret = iomap_apply(inode, start, len, 0, ops, &ctx, 563 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
565 iomap_fiemap_actor); 564 iomap_fiemap_actor);
566 /* inode with no (attribute) mapping will give ENOENT */ 565 /* inode with no (attribute) mapping will give ENOENT */
567 if (ret == -ENOENT) 566 if (ret == -ENOENT)
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ad0c745ebad7..871c8b392099 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
687 pri_bh = NULL; 687 pri_bh = NULL;
688 688
689root_found: 689root_found:
690 /* We don't support read-write mounts */
691 if (!(s->s_flags & MS_RDONLY)) {
692 error = -EACCES;
693 goto out_freebh;
694 }
690 695
691 if (joliet_level && (pri == NULL || !opt.rock)) { 696 if (joliet_level && (pri == NULL || !opt.rock)) {
692 /* This is the case of Joliet with the norock mount flag. 697 /* This is the case of Joliet with the norock mount flag.
@@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb,
1501static struct dentry *isofs_mount(struct file_system_type *fs_type, 1506static struct dentry *isofs_mount(struct file_system_type *fs_type,
1502 int flags, const char *dev_name, void *data) 1507 int flags, const char *dev_name, void *data)
1503{ 1508{
1504 /* We don't support read-write mounts */
1505 if (!(flags & MS_RDONLY))
1506 return ERR_PTR(-EACCES);
1507 return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); 1509 return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
1508} 1510}
1509 1511
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3d8246a9faa4..e1652665bd93 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1149,6 +1149,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1149 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 1149 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1150 spin_lock(&journal->j_list_lock); 1150 spin_lock(&journal->j_list_lock);
1151 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 1151 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1152 spin_unlock(&journal->j_list_lock);
1152 } else if (jh->b_transaction == journal->j_committing_transaction) { 1153 } else if (jh->b_transaction == journal->j_committing_transaction) {
1153 /* first access by this transaction */ 1154 /* first access by this transaction */
1154 jh->b_modified = 0; 1155 jh->b_modified = 0;
@@ -1156,8 +1157,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1156 JBUFFER_TRACE(jh, "set next transaction"); 1157 JBUFFER_TRACE(jh, "set next transaction");
1157 spin_lock(&journal->j_list_lock); 1158 spin_lock(&journal->j_list_lock);
1158 jh->b_next_transaction = transaction; 1159 jh->b_next_transaction = transaction;
1160 spin_unlock(&journal->j_list_lock);
1159 } 1161 }
1160 spin_unlock(&journal->j_list_lock);
1161 jbd_unlock_bh_state(bh); 1162 jbd_unlock_bh_state(bh);
1162 1163
1163 /* 1164 /*
diff --git a/fs/locks.c b/fs/locks.c
index ce93b416b490..22c5b4aa4961 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1609,6 +1609,7 @@ int fcntl_getlease(struct file *filp)
1609 1609
1610 ctx = smp_load_acquire(&inode->i_flctx); 1610 ctx = smp_load_acquire(&inode->i_flctx);
1611 if (ctx && !list_empty_careful(&ctx->flc_lease)) { 1611 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1612 percpu_down_read_preempt_disable(&file_rwsem);
1612 spin_lock(&ctx->flc_lock); 1613 spin_lock(&ctx->flc_lock);
1613 time_out_leases(inode, &dispose); 1614 time_out_leases(inode, &dispose);
1614 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1615 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@@ -1618,6 +1619,8 @@ int fcntl_getlease(struct file *filp)
1618 break; 1619 break;
1619 } 1620 }
1620 spin_unlock(&ctx->flc_lock); 1621 spin_unlock(&ctx->flc_lock);
1622 percpu_up_read_preempt_enable(&file_rwsem);
1623
1621 locks_dispose_list(&dispose); 1624 locks_dispose_list(&dispose);
1622 } 1625 }
1623 return type; 1626 return type;
@@ -2529,11 +2532,14 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2529 if (list_empty(&ctx->flc_lease)) 2532 if (list_empty(&ctx->flc_lease))
2530 return; 2533 return;
2531 2534
2535 percpu_down_read_preempt_disable(&file_rwsem);
2532 spin_lock(&ctx->flc_lock); 2536 spin_lock(&ctx->flc_lock);
2533 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) 2537 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2534 if (filp == fl->fl_file) 2538 if (filp == fl->fl_file)
2535 lease_modify(fl, F_UNLCK, &dispose); 2539 lease_modify(fl, F_UNLCK, &dispose);
2536 spin_unlock(&ctx->flc_lock); 2540 spin_unlock(&ctx->flc_lock);
2541 percpu_up_read_preempt_enable(&file_rwsem);
2542
2537 locks_dispose_list(&dispose); 2543 locks_dispose_list(&dispose);
2538} 2544}
2539 2545
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 217847679f0e..2905479f214a 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work)
344 u64 start = hdr->args.offset & (loff_t)PAGE_MASK; 344 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
345 u64 end = (hdr->args.offset + hdr->args.count + 345 u64 end = (hdr->args.offset + hdr->args.count +
346 PAGE_SIZE - 1) & (loff_t)PAGE_MASK; 346 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
347 u64 lwb = hdr->args.offset + hdr->args.count;
347 348
348 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, 349 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
349 (end - start) >> SECTOR_SHIFT, end); 350 (end - start) >> SECTOR_SHIFT, lwb);
350 } 351 }
351 352
352 pnfs_ld_write_done(hdr); 353 pnfs_ld_write_done(hdr);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ad917bd72b38..7897826d7c51 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1545,7 +1545,7 @@ static int update_open_stateid(struct nfs4_state *state,
1545 struct nfs_client *clp = server->nfs_client; 1545 struct nfs_client *clp = server->nfs_client;
1546 struct nfs_inode *nfsi = NFS_I(state->inode); 1546 struct nfs_inode *nfsi = NFS_I(state->inode);
1547 struct nfs_delegation *deleg_cur; 1547 struct nfs_delegation *deleg_cur;
1548 nfs4_stateid freeme = {0}; 1548 nfs4_stateid freeme = { };
1549 int ret = 0; 1549 int ret = 0;
1550 1550
1551 fmode &= (FMODE_READ|FMODE_WRITE); 1551 fmode &= (FMODE_READ|FMODE_WRITE);
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 1e8fe844e69f..5355efba4bc8 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -73,7 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
73 } 73 }
74 } 74 }
75 75
76 dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 76 orangefs_set_timeout(dentry);
77 ret = 1; 77 ret = 1;
78out_release_op: 78out_release_op:
79 op_release(new_op); 79 op_release(new_op);
@@ -94,8 +94,9 @@ out_drop:
94static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) 94static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
95{ 95{
96 int ret; 96 int ret;
97 unsigned long time = (unsigned long) dentry->d_fsdata;
97 98
98 if (time_before(jiffies, dentry->d_time)) 99 if (time_before(jiffies, time))
99 return 1; 100 return 1;
100 101
101 if (flags & LOOKUP_RCU) 102 if (flags & LOOKUP_RCU)
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 66ea0cc37b18..02cc6139ec90 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -621,9 +621,9 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
621 * readahead cache (if any); this forces an expensive refresh of 621 * readahead cache (if any); this forces an expensive refresh of
622 * data for the next caller of mmap (or 'get_block' accesses) 622 * data for the next caller of mmap (or 'get_block' accesses)
623 */ 623 */
624 if (file->f_path.dentry->d_inode && 624 if (file_inode(file) &&
625 file->f_path.dentry->d_inode->i_mapping && 625 file_inode(file)->i_mapping &&
626 mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) { 626 mapping_nrpages(&file_inode(file)->i_data)) {
627 if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) { 627 if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {
628 gossip_debug(GOSSIP_INODE_DEBUG, 628 gossip_debug(GOSSIP_INODE_DEBUG,
629 "calling flush_racache on %pU\n", 629 "calling flush_racache on %pU\n",
@@ -632,7 +632,7 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
632 gossip_debug(GOSSIP_INODE_DEBUG, 632 gossip_debug(GOSSIP_INODE_DEBUG,
633 "flush_racache finished\n"); 633 "flush_racache finished\n");
634 } 634 }
635 truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping, 635 truncate_inode_pages(file_inode(file)->i_mapping,
636 0); 636 0);
637 } 637 }
638 return 0; 638 return 0;
@@ -648,7 +648,7 @@ static int orangefs_fsync(struct file *file,
648{ 648{
649 int ret = -EINVAL; 649 int ret = -EINVAL;
650 struct orangefs_inode_s *orangefs_inode = 650 struct orangefs_inode_s *orangefs_inode =
651 ORANGEFS_I(file->f_path.dentry->d_inode); 651 ORANGEFS_I(file_inode(file));
652 struct orangefs_kernel_op_s *new_op = NULL; 652 struct orangefs_kernel_op_s *new_op = NULL;
653 653
654 /* required call */ 654 /* required call */
@@ -661,7 +661,7 @@ static int orangefs_fsync(struct file *file,
661 661
662 ret = service_operation(new_op, 662 ret = service_operation(new_op,
663 "orangefs_fsync", 663 "orangefs_fsync",
664 get_interruptible_flag(file->f_path.dentry->d_inode)); 664 get_interruptible_flag(file_inode(file)));
665 665
666 gossip_debug(GOSSIP_FILE_DEBUG, 666 gossip_debug(GOSSIP_FILE_DEBUG,
667 "orangefs_fsync got return value of %d\n", 667 "orangefs_fsync got return value of %d\n",
@@ -669,7 +669,7 @@ static int orangefs_fsync(struct file *file,
669 669
670 op_release(new_op); 670 op_release(new_op);
671 671
672 orangefs_flush_inode(file->f_path.dentry->d_inode); 672 orangefs_flush_inode(file_inode(file));
673 return ret; 673 return ret;
674} 674}
675 675
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index d15d3d2dba62..a290ff6ec756 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -72,7 +72,7 @@ static int orangefs_create(struct inode *dir,
72 72
73 d_instantiate(dentry, inode); 73 d_instantiate(dentry, inode);
74 unlock_new_inode(inode); 74 unlock_new_inode(inode);
75 dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 75 orangefs_set_timeout(dentry);
76 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 76 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
77 77
78 gossip_debug(GOSSIP_NAME_DEBUG, 78 gossip_debug(GOSSIP_NAME_DEBUG,
@@ -183,7 +183,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
183 goto out; 183 goto out;
184 } 184 }
185 185
186 dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 186 orangefs_set_timeout(dentry);
187 187
188 inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn); 188 inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
189 if (IS_ERR(inode)) { 189 if (IS_ERR(inode)) {
@@ -322,7 +322,7 @@ static int orangefs_symlink(struct inode *dir,
322 322
323 d_instantiate(dentry, inode); 323 d_instantiate(dentry, inode);
324 unlock_new_inode(inode); 324 unlock_new_inode(inode);
325 dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 325 orangefs_set_timeout(dentry);
326 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 326 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
327 327
328 gossip_debug(GOSSIP_NAME_DEBUG, 328 gossip_debug(GOSSIP_NAME_DEBUG,
@@ -386,7 +386,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
386 386
387 d_instantiate(dentry, inode); 387 d_instantiate(dentry, inode);
388 unlock_new_inode(inode); 388 unlock_new_inode(inode);
389 dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000; 389 orangefs_set_timeout(dentry);
390 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 390 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
391 391
392 gossip_debug(GOSSIP_NAME_DEBUG, 392 gossip_debug(GOSSIP_NAME_DEBUG,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 0a82048f3aaf..3bf803d732c5 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -580,4 +580,11 @@ static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
580#endif 580#endif
581} 581}
582 582
583static inline void orangefs_set_timeout(struct dentry *dentry)
584{
585 unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
586
587 dentry->d_fsdata = (void *) time;
588}
589
583#endif /* __ORANGEFSKERNEL_H */ 590#endif /* __ORANGEFSKERNEL_H */
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 89600fd5963d..81818adb8e9e 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -412,10 +412,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
412 mm = get_task_mm(task); 412 mm = get_task_mm(task);
413 if (mm) { 413 if (mm) {
414 vsize = task_vsize(mm); 414 vsize = task_vsize(mm);
415 if (permitted) { 415 /*
416 eip = KSTK_EIP(task); 416 * esp and eip are intentionally zeroed out. There is no
417 esp = KSTK_ESP(task); 417 * non-racy way to read them without freezing the task.
418 } 418 * Programs that need reliable values can use ptrace(2).
419 */
419 } 420 }
420 421
421 get_task_comm(tcomm, task); 422 get_task_comm(tcomm, task);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c2964d890c9a..ca651ac00660 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -832,6 +832,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
832 unsigned long addr = *ppos; 832 unsigned long addr = *ppos;
833 ssize_t copied; 833 ssize_t copied;
834 char *page; 834 char *page;
835 unsigned int flags;
835 836
836 if (!mm) 837 if (!mm)
837 return 0; 838 return 0;
@@ -844,6 +845,11 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
844 if (!atomic_inc_not_zero(&mm->mm_users)) 845 if (!atomic_inc_not_zero(&mm->mm_users))
845 goto free; 846 goto free;
846 847
848 /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
849 flags = FOLL_FORCE;
850 if (write)
851 flags |= FOLL_WRITE;
852
847 while (count > 0) { 853 while (count > 0) {
848 int this_len = min_t(int, count, PAGE_SIZE); 854 int this_len = min_t(int, count, PAGE_SIZE);
849 855
@@ -852,7 +858,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
852 break; 858 break;
853 } 859 }
854 860
855 this_len = access_remote_vm(mm, addr, page, this_len, write); 861 this_len = access_remote_vm(mm, addr, page, this_len, flags);
856 if (!this_len) { 862 if (!this_len) {
857 if (!copied) 863 if (!copied)
858 copied = -EIO; 864 copied = -EIO;
@@ -964,8 +970,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
964 max_len = min_t(size_t, PAGE_SIZE, count); 970 max_len = min_t(size_t, PAGE_SIZE, count);
965 this_len = min(max_len, this_len); 971 this_len = min(max_len, this_len);
966 972
967 retval = access_remote_vm(mm, (env_start + src), 973 retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
968 page, this_len, 0);
969 974
970 if (retval <= 0) { 975 if (retval <= 0) {
971 ret = retval; 976 ret = retval;
@@ -1007,6 +1012,9 @@ static ssize_t auxv_read(struct file *file, char __user *buf,
1007{ 1012{
1008 struct mm_struct *mm = file->private_data; 1013 struct mm_struct *mm = file->private_data;
1009 unsigned int nwords = 0; 1014 unsigned int nwords = 0;
1015
1016 if (!mm)
1017 return 0;
1010 do { 1018 do {
1011 nwords += 2; 1019 nwords += 2;
1012 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ 1020 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6909582ce5e5..35b92d81692f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -266,24 +266,15 @@ static int do_maps_open(struct inode *inode, struct file *file,
266 * /proc/PID/maps that is the stack of the main task. 266 * /proc/PID/maps that is the stack of the main task.
267 */ 267 */
268static int is_stack(struct proc_maps_private *priv, 268static int is_stack(struct proc_maps_private *priv,
269 struct vm_area_struct *vma, int is_pid) 269 struct vm_area_struct *vma)
270{ 270{
271 int stack = 0; 271 /*
272 272 * We make no effort to guess what a given thread considers to be
273 if (is_pid) { 273 * its "stack". It's not even well-defined for programs written
274 stack = vma->vm_start <= vma->vm_mm->start_stack && 274 * languages like Go.
275 vma->vm_end >= vma->vm_mm->start_stack; 275 */
276 } else { 276 return vma->vm_start <= vma->vm_mm->start_stack &&
277 struct inode *inode = priv->inode; 277 vma->vm_end >= vma->vm_mm->start_stack;
278 struct task_struct *task;
279
280 rcu_read_lock();
281 task = pid_task(proc_pid(inode), PIDTYPE_PID);
282 if (task)
283 stack = vma_is_stack_for_task(vma, task);
284 rcu_read_unlock();
285 }
286 return stack;
287} 278}
288 279
289static void 280static void
@@ -354,7 +345,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
354 goto done; 345 goto done;
355 } 346 }
356 347
357 if (is_stack(priv, vma, is_pid)) 348 if (is_stack(priv, vma))
358 name = "[stack]"; 349 name = "[stack]";
359 } 350 }
360 351
@@ -1669,7 +1660,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1669 seq_file_path(m, file, "\n\t= "); 1660 seq_file_path(m, file, "\n\t= ");
1670 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1661 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1671 seq_puts(m, " heap"); 1662 seq_puts(m, " heap");
1672 } else if (is_stack(proc_priv, vma, is_pid)) { 1663 } else if (is_stack(proc_priv, vma)) {
1673 seq_puts(m, " stack"); 1664 seq_puts(m, " stack");
1674 } 1665 }
1675 1666
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index faacb0c0d857..37175621e890 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -124,25 +124,17 @@ unsigned long task_statm(struct mm_struct *mm,
124} 124}
125 125
126static int is_stack(struct proc_maps_private *priv, 126static int is_stack(struct proc_maps_private *priv,
127 struct vm_area_struct *vma, int is_pid) 127 struct vm_area_struct *vma)
128{ 128{
129 struct mm_struct *mm = vma->vm_mm; 129 struct mm_struct *mm = vma->vm_mm;
130 int stack = 0; 130
131 131 /*
132 if (is_pid) { 132 * We make no effort to guess what a given thread considers to be
133 stack = vma->vm_start <= mm->start_stack && 133 * its "stack". It's not even well-defined for programs written
134 vma->vm_end >= mm->start_stack; 134 * languages like Go.
135 } else { 135 */
136 struct inode *inode = priv->inode; 136 return vma->vm_start <= mm->start_stack &&
137 struct task_struct *task; 137 vma->vm_end >= mm->start_stack;
138
139 rcu_read_lock();
140 task = pid_task(proc_pid(inode), PIDTYPE_PID);
141 if (task)
142 stack = vma_is_stack_for_task(vma, task);
143 rcu_read_unlock();
144 }
145 return stack;
146} 138}
147 139
148/* 140/*
@@ -184,7 +176,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
184 if (file) { 176 if (file) {
185 seq_pad(m, ' '); 177 seq_pad(m, ' ');
186 seq_file_path(m, file, ""); 178 seq_file_path(m, file, "");
187 } else if (mm && is_stack(priv, vma, is_pid)) { 179 } else if (mm && is_stack(priv, vma)) {
188 seq_pad(m, ' '); 180 seq_pad(m, ' ');
189 seq_printf(m, "[stack]"); 181 seq_printf(m, "[stack]");
190 } 182 }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c8f60df2733e..bd4a5e8ce441 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -439,7 +439,7 @@ static unsigned int vfs_dent_type(uint8_t type)
439 */ 439 */
440static int ubifs_readdir(struct file *file, struct dir_context *ctx) 440static int ubifs_readdir(struct file *file, struct dir_context *ctx)
441{ 441{
442 int err; 442 int err = 0;
443 struct qstr nm; 443 struct qstr nm;
444 union ubifs_key key; 444 union ubifs_key key;
445 struct ubifs_dent_node *dent; 445 struct ubifs_dent_node *dent;
@@ -541,14 +541,12 @@ out:
541 kfree(file->private_data); 541 kfree(file->private_data);
542 file->private_data = NULL; 542 file->private_data = NULL;
543 543
544 if (err != -ENOENT) { 544 if (err != -ENOENT)
545 ubifs_err(c, "cannot find next direntry, error %d", err); 545 ubifs_err(c, "cannot find next direntry, error %d", err);
546 return err;
547 }
548 546
549 /* 2 is a special value indicating that there are no more direntries */ 547 /* 2 is a special value indicating that there are no more direntries */
550 ctx->pos = 2; 548 ctx->pos = 2;
551 return 0; 549 return err;
552} 550}
553 551
554/* Free saved readdir() state when the directory is closed */ 552/* Free saved readdir() state when the directory is closed */
@@ -1060,9 +1058,9 @@ static void unlock_4_inodes(struct inode *inode1, struct inode *inode2,
1060 mutex_unlock(&ubifs_inode(inode1)->ui_mutex); 1058 mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
1061} 1059}
1062 1060
1063static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, 1061static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
1064 struct inode *new_dir, struct dentry *new_dentry, 1062 struct inode *new_dir, struct dentry *new_dentry,
1065 unsigned int flags) 1063 unsigned int flags)
1066{ 1064{
1067 struct ubifs_info *c = old_dir->i_sb->s_fs_info; 1065 struct ubifs_info *c = old_dir->i_sb->s_fs_info;
1068 struct inode *old_inode = d_inode(old_dentry); 1066 struct inode *old_inode = d_inode(old_dentry);
@@ -1323,7 +1321,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
1323 return err; 1321 return err;
1324} 1322}
1325 1323
1326static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry, 1324static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
1327 struct inode *new_dir, struct dentry *new_dentry, 1325 struct inode *new_dir, struct dentry *new_dentry,
1328 unsigned int flags) 1326 unsigned int flags)
1329{ 1327{
@@ -1336,7 +1334,7 @@ static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
1336 if (flags & RENAME_EXCHANGE) 1334 if (flags & RENAME_EXCHANGE)
1337 return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry); 1335 return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
1338 1336
1339 return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); 1337 return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
1340} 1338}
1341 1339
1342int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, 1340int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -1387,7 +1385,7 @@ const struct inode_operations ubifs_dir_inode_operations = {
1387 .mkdir = ubifs_mkdir, 1385 .mkdir = ubifs_mkdir,
1388 .rmdir = ubifs_rmdir, 1386 .rmdir = ubifs_rmdir,
1389 .mknod = ubifs_mknod, 1387 .mknod = ubifs_mknod,
1390 .rename = ubifs_rename2, 1388 .rename = ubifs_rename,
1391 .setattr = ubifs_setattr, 1389 .setattr = ubifs_setattr,
1392 .getattr = ubifs_getattr, 1390 .getattr = ubifs_getattr,
1393 .listxattr = ubifs_listxattr, 1391 .listxattr = ubifs_listxattr,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c2f4d41ed73..d9f9615bfd71 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -172,6 +172,7 @@ out_cancel:
172 host_ui->xattr_cnt -= 1; 172 host_ui->xattr_cnt -= 1;
173 host_ui->xattr_size -= CALC_DENT_SIZE(nm->len); 173 host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
174 host_ui->xattr_size -= CALC_XATTR_BYTES(size); 174 host_ui->xattr_size -= CALC_XATTR_BYTES(size);
175 host_ui->xattr_names -= nm->len;
175 mutex_unlock(&host_ui->ui_mutex); 176 mutex_unlock(&host_ui->ui_mutex);
176out_free: 177out_free:
177 make_bad_inode(inode); 178 make_bad_inode(inode);
@@ -478,6 +479,7 @@ out_cancel:
478 host_ui->xattr_cnt += 1; 479 host_ui->xattr_cnt += 1;
479 host_ui->xattr_size += CALC_DENT_SIZE(nm->len); 480 host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
480 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); 481 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
482 host_ui->xattr_names += nm->len;
481 mutex_unlock(&host_ui->ui_mutex); 483 mutex_unlock(&host_ui->ui_mutex);
482 ubifs_release_budget(c, &req); 484 ubifs_release_budget(c, &req);
483 make_bad_inode(inode); 485 make_bad_inode(inode);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index c27344cf38e1..c6eb21940783 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3974,9 +3974,6 @@ xfs_bmap_remap_alloc(
3974 * allocating, so skip that check by pretending to be freeing. 3974 * allocating, so skip that check by pretending to be freeing.
3975 */ 3975 */
3976 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); 3976 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3977 if (error)
3978 goto error0;
3979error0:
3980 xfs_perag_put(args.pag); 3977 xfs_perag_put(args.pag);
3981 if (error) 3978 if (error)
3982 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_); 3979 trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
@@ -3999,6 +3996,39 @@ xfs_bmap_alloc(
3999 return xfs_bmap_btalloc(ap); 3996 return xfs_bmap_btalloc(ap);
4000} 3997}
4001 3998
3999/* Trim extent to fit a logical block range. */
4000void
4001xfs_trim_extent(
4002 struct xfs_bmbt_irec *irec,
4003 xfs_fileoff_t bno,
4004 xfs_filblks_t len)
4005{
4006 xfs_fileoff_t distance;
4007 xfs_fileoff_t end = bno + len;
4008
4009 if (irec->br_startoff + irec->br_blockcount <= bno ||
4010 irec->br_startoff >= end) {
4011 irec->br_blockcount = 0;
4012 return;
4013 }
4014
4015 if (irec->br_startoff < bno) {
4016 distance = bno - irec->br_startoff;
4017 if (isnullstartblock(irec->br_startblock))
4018 irec->br_startblock = DELAYSTARTBLOCK;
4019 if (irec->br_startblock != DELAYSTARTBLOCK &&
4020 irec->br_startblock != HOLESTARTBLOCK)
4021 irec->br_startblock += distance;
4022 irec->br_startoff += distance;
4023 irec->br_blockcount -= distance;
4024 }
4025
4026 if (end < irec->br_startoff + irec->br_blockcount) {
4027 distance = irec->br_startoff + irec->br_blockcount - end;
4028 irec->br_blockcount -= distance;
4029 }
4030}
4031
4002/* 4032/*
4003 * Trim the returned map to the required bounds 4033 * Trim the returned map to the required bounds
4004 */ 4034 */
@@ -4829,6 +4859,219 @@ xfs_bmap_split_indlen(
4829 return stolen; 4859 return stolen;
4830} 4860}
4831 4861
4862int
4863xfs_bmap_del_extent_delay(
4864 struct xfs_inode *ip,
4865 int whichfork,
4866 xfs_extnum_t *idx,
4867 struct xfs_bmbt_irec *got,
4868 struct xfs_bmbt_irec *del)
4869{
4870 struct xfs_mount *mp = ip->i_mount;
4871 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4872 struct xfs_bmbt_irec new;
4873 int64_t da_old, da_new, da_diff = 0;
4874 xfs_fileoff_t del_endoff, got_endoff;
4875 xfs_filblks_t got_indlen, new_indlen, stolen;
4876 int error = 0, state = 0;
4877 bool isrt;
4878
4879 XFS_STATS_INC(mp, xs_del_exlist);
4880
4881 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4882 del_endoff = del->br_startoff + del->br_blockcount;
4883 got_endoff = got->br_startoff + got->br_blockcount;
4884 da_old = startblockval(got->br_startblock);
4885 da_new = 0;
4886
4887 ASSERT(*idx >= 0);
4888 ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
4889 ASSERT(del->br_blockcount > 0);
4890 ASSERT(got->br_startoff <= del->br_startoff);
4891 ASSERT(got_endoff >= del_endoff);
4892
4893 if (isrt) {
4894 int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4895
4896 do_div(rtexts, mp->m_sb.sb_rextsize);
4897 xfs_mod_frextents(mp, rtexts);
4898 }
4899
4900 /*
4901 * Update the inode delalloc counter now and wait to update the
4902 * sb counters as we might have to borrow some blocks for the
4903 * indirect block accounting.
4904 */
4905 xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,
4906 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4907 ip->i_delayed_blks -= del->br_blockcount;
4908
4909 if (whichfork == XFS_COW_FORK)
4910 state |= BMAP_COWFORK;
4911
4912 if (got->br_startoff == del->br_startoff)
4913 state |= BMAP_LEFT_CONTIG;
4914 if (got_endoff == del_endoff)
4915 state |= BMAP_RIGHT_CONTIG;
4916
4917 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
4918 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
4919 /*
4920 * Matches the whole extent. Delete the entry.
4921 */
4922 xfs_iext_remove(ip, *idx, 1, state);
4923 --*idx;
4924 break;
4925 case BMAP_LEFT_CONTIG:
4926 /*
4927 * Deleting the first part of the extent.
4928 */
4929 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4930 got->br_startoff = del_endoff;
4931 got->br_blockcount -= del->br_blockcount;
4932 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4933 got->br_blockcount), da_old);
4934 got->br_startblock = nullstartblock((int)da_new);
4935 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4936 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4937 break;
4938 case BMAP_RIGHT_CONTIG:
4939 /*
4940 * Deleting the last part of the extent.
4941 */
4942 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4943 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4944 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4945 got->br_blockcount), da_old);
4946 got->br_startblock = nullstartblock((int)da_new);
4947 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4948 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4949 break;
4950 case 0:
4951 /*
4952 * Deleting the middle of the extent.
4953 *
4954 * Distribute the original indlen reservation across the two new
4955 * extents. Steal blocks from the deleted extent if necessary.
4956 * Stealing blocks simply fudges the fdblocks accounting below.
4957 * Warn if either of the new indlen reservations is zero as this
4958 * can lead to delalloc problems.
4959 */
4960 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4961
4962 got->br_blockcount = del->br_startoff - got->br_startoff;
4963 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4964
4965 new.br_blockcount = got_endoff - del_endoff;
4966 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4967
4968 WARN_ON_ONCE(!got_indlen || !new_indlen);
4969 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4970 del->br_blockcount);
4971
4972 got->br_startblock = nullstartblock((int)got_indlen);
4973 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
4974 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
4975
4976 new.br_startoff = del_endoff;
4977 new.br_state = got->br_state;
4978 new.br_startblock = nullstartblock((int)new_indlen);
4979
4980 ++*idx;
4981 xfs_iext_insert(ip, *idx, 1, &new, state);
4982
4983 da_new = got_indlen + new_indlen - stolen;
4984 del->br_blockcount -= stolen;
4985 break;
4986 }
4987
4988 ASSERT(da_old >= da_new);
4989 da_diff = da_old - da_new;
4990 if (!isrt)
4991 da_diff += del->br_blockcount;
4992 if (da_diff)
4993 xfs_mod_fdblocks(mp, da_diff, false);
4994 return error;
4995}
4996
4997void
4998xfs_bmap_del_extent_cow(
4999 struct xfs_inode *ip,
5000 xfs_extnum_t *idx,
5001 struct xfs_bmbt_irec *got,
5002 struct xfs_bmbt_irec *del)
5003{
5004 struct xfs_mount *mp = ip->i_mount;
5005 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
5006 struct xfs_bmbt_irec new;
5007 xfs_fileoff_t del_endoff, got_endoff;
5008 int state = BMAP_COWFORK;
5009
5010 XFS_STATS_INC(mp, xs_del_exlist);
5011
5012 del_endoff = del->br_startoff + del->br_blockcount;
5013 got_endoff = got->br_startoff + got->br_blockcount;
5014
5015 ASSERT(*idx >= 0);
5016 ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
5017 ASSERT(del->br_blockcount > 0);
5018 ASSERT(got->br_startoff <= del->br_startoff);
5019 ASSERT(got_endoff >= del_endoff);
5020 ASSERT(!isnullstartblock(got->br_startblock));
5021
5022 if (got->br_startoff == del->br_startoff)
5023 state |= BMAP_LEFT_CONTIG;
5024 if (got_endoff == del_endoff)
5025 state |= BMAP_RIGHT_CONTIG;
5026
5027 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5028 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5029 /*
5030 * Matches the whole extent. Delete the entry.
5031 */
5032 xfs_iext_remove(ip, *idx, 1, state);
5033 --*idx;
5034 break;
5035 case BMAP_LEFT_CONTIG:
5036 /*
5037 * Deleting the first part of the extent.
5038 */
5039 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5040 got->br_startoff = del_endoff;
5041 got->br_blockcount -= del->br_blockcount;
5042 got->br_startblock = del->br_startblock + del->br_blockcount;
5043 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5044 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5045 break;
5046 case BMAP_RIGHT_CONTIG:
5047 /*
5048 * Deleting the last part of the extent.
5049 */
5050 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5051 got->br_blockcount -= del->br_blockcount;
5052 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5053 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5054 break;
5055 case 0:
5056 /*
5057 * Deleting the middle of the extent.
5058 */
5059 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5060 got->br_blockcount = del->br_startoff - got->br_startoff;
5061 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
5062 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5063
5064 new.br_startoff = del_endoff;
5065 new.br_blockcount = got_endoff - del_endoff;
5066 new.br_state = got->br_state;
5067 new.br_startblock = del->br_startblock + del->br_blockcount;
5068
5069 ++*idx;
5070 xfs_iext_insert(ip, *idx, 1, &new, state);
5071 break;
5072 }
5073}
5074
4832/* 5075/*
4833 * Called by xfs_bmapi to update file extent records and the btree 5076 * Called by xfs_bmapi to update file extent records and the btree
4834 * after removing space (or undoing a delayed allocation). 5077 * after removing space (or undoing a delayed allocation).
@@ -5171,175 +5414,6 @@ done:
5171 return error; 5414 return error;
5172} 5415}
5173 5416
5174/* Remove an extent from the CoW fork. Similar to xfs_bmap_del_extent. */
5175int
5176xfs_bunmapi_cow(
5177 struct xfs_inode *ip,
5178 struct xfs_bmbt_irec *del)
5179{
5180 xfs_filblks_t da_new;
5181 xfs_filblks_t da_old;
5182 xfs_fsblock_t del_endblock = 0;
5183 xfs_fileoff_t del_endoff;
5184 int delay;
5185 struct xfs_bmbt_rec_host *ep;
5186 int error;
5187 struct xfs_bmbt_irec got;
5188 xfs_fileoff_t got_endoff;
5189 struct xfs_ifork *ifp;
5190 struct xfs_mount *mp;
5191 xfs_filblks_t nblks;
5192 struct xfs_bmbt_irec new;
5193 /* REFERENCED */
5194 uint qfield;
5195 xfs_filblks_t temp;
5196 xfs_filblks_t temp2;
5197 int state = BMAP_COWFORK;
5198 int eof;
5199 xfs_extnum_t eidx;
5200
5201 mp = ip->i_mount;
5202 XFS_STATS_INC(mp, xs_del_exlist);
5203
5204 ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof,
5205 &eidx, &got, &new);
5206
5207 ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp;
5208 ASSERT((eidx >= 0) && (eidx < ifp->if_bytes /
5209 (uint)sizeof(xfs_bmbt_rec_t)));
5210 ASSERT(del->br_blockcount > 0);
5211 ASSERT(got.br_startoff <= del->br_startoff);
5212 del_endoff = del->br_startoff + del->br_blockcount;
5213 got_endoff = got.br_startoff + got.br_blockcount;
5214 ASSERT(got_endoff >= del_endoff);
5215 delay = isnullstartblock(got.br_startblock);
5216 ASSERT(isnullstartblock(del->br_startblock) == delay);
5217 qfield = 0;
5218 error = 0;
5219 /*
5220 * If deleting a real allocation, must free up the disk space.
5221 */
5222 if (!delay) {
5223 nblks = del->br_blockcount;
5224 qfield = XFS_TRANS_DQ_BCOUNT;
5225 /*
5226 * Set up del_endblock and cur for later.
5227 */
5228 del_endblock = del->br_startblock + del->br_blockcount;
5229 da_old = da_new = 0;
5230 } else {
5231 da_old = startblockval(got.br_startblock);
5232 da_new = 0;
5233 nblks = 0;
5234 }
5235 qfield = qfield;
5236 nblks = nblks;
5237
5238 /*
5239 * Set flag value to use in switch statement.
5240 * Left-contig is 2, right-contig is 1.
5241 */
5242 switch (((got.br_startoff == del->br_startoff) << 1) |
5243 (got_endoff == del_endoff)) {
5244 case 3:
5245 /*
5246 * Matches the whole extent. Delete the entry.
5247 */
5248 xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK);
5249 --eidx;
5250 break;
5251
5252 case 2:
5253 /*
5254 * Deleting the first part of the extent.
5255 */
5256 trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
5257 xfs_bmbt_set_startoff(ep, del_endoff);
5258 temp = got.br_blockcount - del->br_blockcount;
5259 xfs_bmbt_set_blockcount(ep, temp);
5260 if (delay) {
5261 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5262 da_old);
5263 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5264 trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
5265 da_new = temp;
5266 break;
5267 }
5268 xfs_bmbt_set_startblock(ep, del_endblock);
5269 trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
5270 break;
5271
5272 case 1:
5273 /*
5274 * Deleting the last part of the extent.
5275 */
5276 temp = got.br_blockcount - del->br_blockcount;
5277 trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
5278 xfs_bmbt_set_blockcount(ep, temp);
5279 if (delay) {
5280 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5281 da_old);
5282 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5283 trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
5284 da_new = temp;
5285 break;
5286 }
5287 trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
5288 break;
5289
5290 case 0:
5291 /*
5292 * Deleting the middle of the extent.
5293 */
5294 temp = del->br_startoff - got.br_startoff;
5295 trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
5296 xfs_bmbt_set_blockcount(ep, temp);
5297 new.br_startoff = del_endoff;
5298 temp2 = got_endoff - del_endoff;
5299 new.br_blockcount = temp2;
5300 new.br_state = got.br_state;
5301 if (!delay) {
5302 new.br_startblock = del_endblock;
5303 } else {
5304 temp = xfs_bmap_worst_indlen(ip, temp);
5305 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5306 temp2 = xfs_bmap_worst_indlen(ip, temp2);
5307 new.br_startblock = nullstartblock((int)temp2);
5308 da_new = temp + temp2;
5309 while (da_new > da_old) {
5310 if (temp) {
5311 temp--;
5312 da_new--;
5313 xfs_bmbt_set_startblock(ep,
5314 nullstartblock((int)temp));
5315 }
5316 if (da_new == da_old)
5317 break;
5318 if (temp2) {
5319 temp2--;
5320 da_new--;
5321 new.br_startblock =
5322 nullstartblock((int)temp2);
5323 }
5324 }
5325 }
5326 trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
5327 xfs_iext_insert(ip, eidx + 1, 1, &new, state);
5328 ++eidx;
5329 break;
5330 }
5331
5332 /*
5333 * Account for change in delayed indirect blocks.
5334 * Nothing to do for disk quota accounting here.
5335 */
5336 ASSERT(da_old >= da_new);
5337 if (da_old > da_new)
5338 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5339
5340 return error;
5341}
5342
5343/* 5417/*
5344 * Unmap (remove) blocks from a file. 5418 * Unmap (remove) blocks from a file.
5345 * If nexts is nonzero then the number of extents to remove is limited to 5419 * If nexts is nonzero then the number of extents to remove is limited to
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f97db7132564..7cae6ec27fa6 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
190#define XFS_BMAP_TRACE_EXLIST(ip,c,w) 190#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
191#endif 191#endif
192 192
193void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
194 xfs_filblks_t len);
193int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 195int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
194void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); 196void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
195void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, 197void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
221 xfs_fileoff_t bno, xfs_filblks_t len, int flags, 223 xfs_fileoff_t bno, xfs_filblks_t len, int flags,
222 xfs_extnum_t nexts, xfs_fsblock_t *firstblock, 224 xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
223 struct xfs_defer_ops *dfops, int *done); 225 struct xfs_defer_ops *dfops, int *done);
224int xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del); 226int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
227 xfs_extnum_t *idx, struct xfs_bmbt_irec *got,
228 struct xfs_bmbt_irec *del);
229void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
230 struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
225int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx, 231int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
226 xfs_extnum_t num); 232 xfs_extnum_t num);
227uint xfs_default_attroffset(struct xfs_inode *ip); 233uint xfs_default_attroffset(struct xfs_inode *ip);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5c8e6f2ce44f..0e80993c8a59 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
4826 return rval; 4826 return rval;
4827} 4827}
4828 4828
4829int 4829static int
4830xfs_btree_count_blocks_helper( 4830xfs_btree_count_blocks_helper(
4831 struct xfs_btree_cur *cur, 4831 struct xfs_btree_cur *cur,
4832 int level, 4832 int level,
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 3cc3cf767474..ac9a003dd29a 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
191 if (mp->m_quotainfo) 191 if (mp->m_quotainfo)
192 ndquots = mp->m_quotainfo->qi_dqperchunk; 192 ndquots = mp->m_quotainfo->qi_dqperchunk;
193 else 193 else
194 ndquots = xfs_calc_dquots_per_chunk( 194 ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
195 XFS_BB_TO_FSB(mp, bp->b_length));
196 195
197 for (i = 0; i < ndquots; i++, d++) { 196 for (i = 0; i < ndquots; i++, d++) {
198 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), 197 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index f6547fc5e016..6b7579e7b60a 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
865 * padding field for v3 inodes. 865 * padding field for v3 inodes.
866 */ 866 */
867#define XFS_DINODE_MAGIC 0x494e /* 'IN' */ 867#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
868#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
869typedef struct xfs_dinode { 868typedef struct xfs_dinode {
870 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */ 869 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
871 __be16 di_mode; /* mode and type of file */ 870 __be16 di_mode; /* mode and type of file */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 8de9a3a29589..134424fac434 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -57,6 +57,17 @@ xfs_inobp_check(
57} 57}
58#endif 58#endif
59 59
60bool
61xfs_dinode_good_version(
62 struct xfs_mount *mp,
63 __u8 version)
64{
65 if (xfs_sb_version_hascrc(&mp->m_sb))
66 return version == 3;
67
68 return version == 1 || version == 2;
69}
70
60/* 71/*
61 * If we are doing readahead on an inode buffer, we might be in log recovery 72 * If we are doing readahead on an inode buffer, we might be in log recovery
62 * reading an inode allocation buffer that hasn't yet been replayed, and hence 73 * reading an inode allocation buffer that hasn't yet been replayed, and hence
@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
91 102
92 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); 103 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
93 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && 104 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
94 XFS_DINODE_GOOD_VERSION(dip->di_version); 105 xfs_dinode_good_version(mp, dip->di_version);
95 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 106 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
96 XFS_ERRTAG_ITOBP_INOTOBP, 107 XFS_ERRTAG_ITOBP_INOTOBP,
97 XFS_RANDOM_ITOBP_INOTOBP))) { 108 XFS_RANDOM_ITOBP_INOTOBP))) {
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index 62d9d4681c8c..3cfe12a4f58a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
74void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, 74void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
75 struct xfs_dinode *to); 75 struct xfs_dinode *to);
76 76
77bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
78
77#if defined(DEBUG) 79#if defined(DEBUG)
78void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); 80void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
79#else 81#else
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a314fc7b56fa..6e4f7f900fea 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
249 struct xfs_inode *ip = XFS_I(inode); 249 struct xfs_inode *ip = XFS_I(inode);
250 loff_t isize = i_size_read(inode); 250 loff_t isize = i_size_read(inode);
251 size_t count = iov_iter_count(to); 251 size_t count = iov_iter_count(to);
252 loff_t end = iocb->ki_pos + count - 1;
252 struct iov_iter data; 253 struct iov_iter data;
253 struct xfs_buftarg *target; 254 struct xfs_buftarg *target;
254 ssize_t ret = 0; 255 ssize_t ret = 0;
@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
272 273
273 file_accessed(iocb->ki_filp); 274 file_accessed(iocb->ki_filp);
274 275
275 /*
276 * Locking is a bit tricky here. If we take an exclusive lock for direct
277 * IO, we effectively serialise all new concurrent read IO to this file
278 * and block it behind IO that is currently in progress because IO in
279 * progress holds the IO lock shared. We only need to hold the lock
280 * exclusive to blow away the page cache, so only take lock exclusively
281 * if the page cache needs invalidation. This allows the normal direct
282 * IO case of no page cache pages to proceeed concurrently without
283 * serialisation.
284 */
285 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 276 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
286 if (mapping->nrpages) { 277 if (mapping->nrpages) {
287 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 278 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
288 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 279 if (ret)
280 goto out_unlock;
289 281
290 /* 282 /*
291 * The generic dio code only flushes the range of the particular 283 * Invalidate whole pages. This can return an error if we fail
292 * I/O. Because we take an exclusive lock here, this whole 284 * to invalidate a page, but this should never happen on XFS.
293 * sequence is considerably more expensive for us. This has a 285 * Warn if it does fail.
294 * noticeable performance impact for any file with cached pages,
295 * even when outside of the range of the particular I/O.
296 *
297 * Hence, amortize the cost of the lock against a full file
298 * flush and reduce the chances of repeated iolock cycles going
299 * forward.
300 */ 286 */
301 if (mapping->nrpages) { 287 ret = invalidate_inode_pages2_range(mapping,
302 ret = filemap_write_and_wait(mapping); 288 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
303 if (ret) { 289 WARN_ON_ONCE(ret);
304 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 290 ret = 0;
305 return ret;
306 }
307
308 /*
309 * Invalidate whole pages. This can return an error if
310 * we fail to invalidate a page, but this should never
311 * happen on XFS. Warn if it does fail.
312 */
313 ret = invalidate_inode_pages2(mapping);
314 WARN_ON_ONCE(ret);
315 ret = 0;
316 }
317 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
318 } 291 }
319 292
320 data = *to; 293 data = *to;
@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
324 iocb->ki_pos += ret; 297 iocb->ki_pos += ret;
325 iov_iter_advance(to, ret); 298 iov_iter_advance(to, ret);
326 } 299 }
327 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
328 300
301out_unlock:
302 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
329 return ret; 303 return ret;
330} 304}
331 305
@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
570 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) 544 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
571 return -EINVAL; 545 return -EINVAL;
572 546
573 /* "unaligned" here means not aligned to a filesystem block */
574 if ((iocb->ki_pos & mp->m_blockmask) ||
575 ((iocb->ki_pos + count) & mp->m_blockmask))
576 unaligned_io = 1;
577
578 /* 547 /*
579 * We don't need to take an exclusive lock unless there page cache needs 548 * Don't take the exclusive iolock here unless the I/O is unaligned to
580 * to be invalidated or unaligned IO is being executed. We don't need to 549 * the file system block size. We don't need to consider the EOF
581 * consider the EOF extension case here because 550 * extension case here because xfs_file_aio_write_checks() will relock
582 * xfs_file_aio_write_checks() will relock the inode as necessary for 551 * the inode as necessary for EOF zeroing cases and fill out the new
583 * EOF zeroing cases and fill out the new inode size as appropriate. 552 * inode size as appropriate.
584 */ 553 */
585 if (unaligned_io || mapping->nrpages) 554 if ((iocb->ki_pos & mp->m_blockmask) ||
555 ((iocb->ki_pos + count) & mp->m_blockmask)) {
556 unaligned_io = 1;
586 iolock = XFS_IOLOCK_EXCL; 557 iolock = XFS_IOLOCK_EXCL;
587 else 558 } else {
588 iolock = XFS_IOLOCK_SHARED; 559 iolock = XFS_IOLOCK_SHARED;
589 xfs_rw_ilock(ip, iolock);
590
591 /*
592 * Recheck if there are cached pages that need invalidate after we got
593 * the iolock to protect against other threads adding new pages while
594 * we were waiting for the iolock.
595 */
596 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
597 xfs_rw_iunlock(ip, iolock);
598 iolock = XFS_IOLOCK_EXCL;
599 xfs_rw_ilock(ip, iolock);
600 } 560 }
601 561
562 xfs_rw_ilock(ip, iolock);
563
602 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 564 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
603 if (ret) 565 if (ret)
604 goto out; 566 goto out;
605 count = iov_iter_count(from); 567 count = iov_iter_count(from);
606 end = iocb->ki_pos + count - 1; 568 end = iocb->ki_pos + count - 1;
607 569
608 /*
609 * See xfs_file_dio_aio_read() for why we do a full-file flush here.
610 */
611 if (mapping->nrpages) { 570 if (mapping->nrpages) {
612 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); 571 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
613 if (ret) 572 if (ret)
614 goto out; 573 goto out;
574
615 /* 575 /*
616 * Invalidate whole pages. This can return an error if we fail 576 * Invalidate whole pages. This can return an error if we fail
617 * to invalidate a page, but this should never happen on XFS. 577 * to invalidate a page, but this should never happen on XFS.
618 * Warn if it does fail. 578 * Warn if it does fail.
619 */ 579 */
620 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); 580 ret = invalidate_inode_pages2_range(mapping,
581 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
621 WARN_ON_ONCE(ret); 582 WARN_ON_ONCE(ret);
622 ret = 0; 583 ret = 0;
623 } 584 }
624 585
625 /* 586 /*
626 * If we are doing unaligned IO, wait for all other IO to drain, 587 * If we are doing unaligned IO, wait for all other IO to drain,
627 * otherwise demote the lock if we had to flush cached pages 588 * otherwise demote the lock if we had to take the exclusive lock
589 * for other reasons in xfs_file_aio_write_checks.
628 */ 590 */
629 if (unaligned_io) 591 if (unaligned_io)
630 inode_dio_wait(inode); 592 inode_dio_wait(inode);
@@ -947,134 +909,6 @@ out_unlock:
947 return error; 909 return error;
948} 910}
949 911
950/*
951 * Flush all file writes out to disk.
952 */
953static int
954xfs_file_wait_for_io(
955 struct inode *inode,
956 loff_t offset,
957 size_t len)
958{
959 loff_t rounding;
960 loff_t ioffset;
961 loff_t iendoffset;
962 loff_t bs;
963 int ret;
964
965 bs = inode->i_sb->s_blocksize;
966 inode_dio_wait(inode);
967
968 rounding = max_t(xfs_off_t, bs, PAGE_SIZE);
969 ioffset = round_down(offset, rounding);
970 iendoffset = round_up(offset + len, rounding) - 1;
971 ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
972 iendoffset);
973 return ret;
974}
975
976/* Hook up to the VFS reflink function */
977STATIC int
978xfs_file_share_range(
979 struct file *file_in,
980 loff_t pos_in,
981 struct file *file_out,
982 loff_t pos_out,
983 u64 len,
984 bool is_dedupe)
985{
986 struct inode *inode_in;
987 struct inode *inode_out;
988 ssize_t ret;
989 loff_t bs;
990 loff_t isize;
991 int same_inode;
992 loff_t blen;
993 unsigned int flags = 0;
994
995 inode_in = file_inode(file_in);
996 inode_out = file_inode(file_out);
997 bs = inode_out->i_sb->s_blocksize;
998
999 /* Don't touch certain kinds of inodes */
1000 if (IS_IMMUTABLE(inode_out))
1001 return -EPERM;
1002 if (IS_SWAPFILE(inode_in) ||
1003 IS_SWAPFILE(inode_out))
1004 return -ETXTBSY;
1005
1006 /* Reflink only works within this filesystem. */
1007 if (inode_in->i_sb != inode_out->i_sb)
1008 return -EXDEV;
1009 same_inode = (inode_in->i_ino == inode_out->i_ino);
1010
1011 /* Don't reflink dirs, pipes, sockets... */
1012 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1013 return -EISDIR;
1014 if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
1015 return -EINVAL;
1016 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1017 return -EINVAL;
1018
1019 /* Don't share DAX file data for now. */
1020 if (IS_DAX(inode_in) || IS_DAX(inode_out))
1021 return -EINVAL;
1022
1023 /* Are we going all the way to the end? */
1024 isize = i_size_read(inode_in);
1025 if (isize == 0)
1026 return 0;
1027 if (len == 0)
1028 len = isize - pos_in;
1029
1030 /* Ensure offsets don't wrap and the input is inside i_size */
1031 if (pos_in + len < pos_in || pos_out + len < pos_out ||
1032 pos_in + len > isize)
1033 return -EINVAL;
1034
1035 /* Don't allow dedupe past EOF in the dest file */
1036 if (is_dedupe) {
1037 loff_t disize;
1038
1039 disize = i_size_read(inode_out);
1040 if (pos_out >= disize || pos_out + len > disize)
1041 return -EINVAL;
1042 }
1043
1044 /* If we're linking to EOF, continue to the block boundary. */
1045 if (pos_in + len == isize)
1046 blen = ALIGN(isize, bs) - pos_in;
1047 else
1048 blen = len;
1049
1050 /* Only reflink if we're aligned to block boundaries */
1051 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
1052 !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
1053 return -EINVAL;
1054
1055 /* Don't allow overlapped reflink within the same file */
1056 if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)
1057 return -EINVAL;
1058
1059 /* Wait for the completion of any pending IOs on srcfile */
1060 ret = xfs_file_wait_for_io(inode_in, pos_in, len);
1061 if (ret)
1062 goto out;
1063 ret = xfs_file_wait_for_io(inode_out, pos_out, len);
1064 if (ret)
1065 goto out;
1066
1067 if (is_dedupe)
1068 flags |= XFS_REFLINK_DEDUPE;
1069 ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
1070 pos_out, len, flags);
1071 if (ret < 0)
1072 goto out;
1073
1074out:
1075 return ret;
1076}
1077
1078STATIC ssize_t 912STATIC ssize_t
1079xfs_file_copy_range( 913xfs_file_copy_range(
1080 struct file *file_in, 914 struct file *file_in,
@@ -1086,7 +920,7 @@ xfs_file_copy_range(
1086{ 920{
1087 int error; 921 int error;
1088 922
1089 error = xfs_file_share_range(file_in, pos_in, file_out, pos_out, 923 error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
1090 len, false); 924 len, false);
1091 if (error) 925 if (error)
1092 return error; 926 return error;
@@ -1101,7 +935,7 @@ xfs_file_clone_range(
1101 loff_t pos_out, 935 loff_t pos_out,
1102 u64 len) 936 u64 len)
1103{ 937{
1104 return xfs_file_share_range(file_in, pos_in, file_out, pos_out, 938 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
1105 len, false); 939 len, false);
1106} 940}
1107 941
@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
1124 if (len > XFS_MAX_DEDUPE_LEN) 958 if (len > XFS_MAX_DEDUPE_LEN)
1125 len = XFS_MAX_DEDUPE_LEN; 959 len = XFS_MAX_DEDUPE_LEN;
1126 960
1127 error = xfs_file_share_range(src_file, loff, dst_file, dst_loff, 961 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
1128 len, true); 962 len, true);
1129 if (error) 963 if (error)
1130 return error; 964 return error;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 14796b744e0a..f295049db681 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1656,9 +1656,9 @@ void
1656xfs_inode_set_cowblocks_tag( 1656xfs_inode_set_cowblocks_tag(
1657 xfs_inode_t *ip) 1657 xfs_inode_t *ip)
1658{ 1658{
1659 trace_xfs_inode_set_eofblocks_tag(ip); 1659 trace_xfs_inode_set_cowblocks_tag(ip);
1660 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, 1660 return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
1661 trace_xfs_perag_set_eofblocks, 1661 trace_xfs_perag_set_cowblocks,
1662 XFS_ICI_COWBLOCKS_TAG); 1662 XFS_ICI_COWBLOCKS_TAG);
1663} 1663}
1664 1664
@@ -1666,7 +1666,7 @@ void
1666xfs_inode_clear_cowblocks_tag( 1666xfs_inode_clear_cowblocks_tag(
1667 xfs_inode_t *ip) 1667 xfs_inode_t *ip)
1668{ 1668{
1669 trace_xfs_inode_clear_eofblocks_tag(ip); 1669 trace_xfs_inode_clear_cowblocks_tag(ip);
1670 return __xfs_inode_clear_eofblocks_tag(ip, 1670 return __xfs_inode_clear_eofblocks_tag(ip,
1671 trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG); 1671 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1672} 1672}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d907eb9f8ef3..436e109bb01e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -566,6 +566,17 @@ xfs_file_iomap_begin_delay(
566 xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx, 566 xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
567 &got, &prev); 567 &got, &prev);
568 if (!eof && got.br_startoff <= offset_fsb) { 568 if (!eof && got.br_startoff <= offset_fsb) {
569 if (xfs_is_reflink_inode(ip)) {
570 bool shared;
571
572 end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
573 maxbytes_fsb);
574 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
575 error = xfs_reflink_reserve_cow(ip, &got, &shared);
576 if (error)
577 goto out_unlock;
578 }
579
569 trace_xfs_iomap_found(ip, offset, count, 0, &got); 580 trace_xfs_iomap_found(ip, offset, count, 0, &got);
570 goto done; 581 goto done;
571 } 582 }
@@ -961,19 +972,13 @@ xfs_file_iomap_begin(
961 struct xfs_mount *mp = ip->i_mount; 972 struct xfs_mount *mp = ip->i_mount;
962 struct xfs_bmbt_irec imap; 973 struct xfs_bmbt_irec imap;
963 xfs_fileoff_t offset_fsb, end_fsb; 974 xfs_fileoff_t offset_fsb, end_fsb;
964 bool shared, trimmed;
965 int nimaps = 1, error = 0; 975 int nimaps = 1, error = 0;
976 bool shared = false, trimmed = false;
966 unsigned lockmode; 977 unsigned lockmode;
967 978
968 if (XFS_FORCED_SHUTDOWN(mp)) 979 if (XFS_FORCED_SHUTDOWN(mp))
969 return -EIO; 980 return -EIO;
970 981
971 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
972 error = xfs_reflink_reserve_cow_range(ip, offset, length);
973 if (error < 0)
974 return error;
975 }
976
977 if ((flags & IOMAP_WRITE) && !IS_DAX(inode) && 982 if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
978 !xfs_get_extsz_hint(ip)) { 983 !xfs_get_extsz_hint(ip)) {
979 /* Reserve delalloc blocks for regular writeback. */ 984 /* Reserve delalloc blocks for regular writeback. */
@@ -981,7 +986,16 @@ xfs_file_iomap_begin(
981 iomap); 986 iomap);
982 } 987 }
983 988
984 lockmode = xfs_ilock_data_map_shared(ip); 989 /*
990 * COW writes will allocate delalloc space, so we need to make sure
991 * to take the lock exclusively here.
992 */
993 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
994 lockmode = XFS_ILOCK_EXCL;
995 xfs_ilock(ip, XFS_ILOCK_EXCL);
996 } else {
997 lockmode = xfs_ilock_data_map_shared(ip);
998 }
985 999
986 ASSERT(offset <= mp->m_super->s_maxbytes); 1000 ASSERT(offset <= mp->m_super->s_maxbytes);
987 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) 1001 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
@@ -991,16 +1005,24 @@ xfs_file_iomap_begin(
991 1005
992 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1006 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
993 &nimaps, 0); 1007 &nimaps, 0);
994 if (error) { 1008 if (error)
995 xfs_iunlock(ip, lockmode); 1009 goto out_unlock;
996 return error; 1010
1011 if (flags & IOMAP_REPORT) {
1012 /* Trim the mapping to the nearest shared extent boundary. */
1013 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
1014 &trimmed);
1015 if (error)
1016 goto out_unlock;
997 } 1017 }
998 1018
999 /* Trim the mapping to the nearest shared extent boundary. */ 1019 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
1000 error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed); 1020 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
1001 if (error) { 1021 if (error)
1002 xfs_iunlock(ip, lockmode); 1022 goto out_unlock;
1003 return error; 1023
1024 end_fsb = imap.br_startoff + imap.br_blockcount;
1025 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1004 } 1026 }
1005 1027
1006 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) { 1028 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
@@ -1039,6 +1061,9 @@ xfs_file_iomap_begin(
1039 if (shared) 1061 if (shared)
1040 iomap->flags |= IOMAP_F_SHARED; 1062 iomap->flags |= IOMAP_F_SHARED;
1041 return 0; 1063 return 0;
1064out_unlock:
1065 xfs_iunlock(ip, lockmode);
1066 return error;
1042} 1067}
1043 1068
1044static int 1069static int
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index fc7873942bea..b341f10cf481 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1009,6 +1009,7 @@ xfs_mountfs(
1009 out_quota: 1009 out_quota:
1010 xfs_qm_unmount_quotas(mp); 1010 xfs_qm_unmount_quotas(mp);
1011 out_rtunmount: 1011 out_rtunmount:
1012 mp->m_super->s_flags &= ~MS_ACTIVE;
1012 xfs_rtunmount_inodes(mp); 1013 xfs_rtunmount_inodes(mp);
1013 out_rele_rip: 1014 out_rele_rip:
1014 IRELE(rip); 1015 IRELE(rip);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 5965e9455d91..a279b4e7f5fe 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -182,7 +182,8 @@ xfs_reflink_trim_around_shared(
182 if (!xfs_is_reflink_inode(ip) || 182 if (!xfs_is_reflink_inode(ip) ||
183 ISUNWRITTEN(irec) || 183 ISUNWRITTEN(irec) ||
184 irec->br_startblock == HOLESTARTBLOCK || 184 irec->br_startblock == HOLESTARTBLOCK ||
185 irec->br_startblock == DELAYSTARTBLOCK) { 185 irec->br_startblock == DELAYSTARTBLOCK ||
186 isnullstartblock(irec->br_startblock)) {
186 *shared = false; 187 *shared = false;
187 return 0; 188 return 0;
188 } 189 }
@@ -227,50 +228,54 @@ xfs_reflink_trim_around_shared(
227 } 228 }
228} 229}
229 230
230/* Create a CoW reservation for a range of blocks within a file. */ 231/*
231static int 232 * Trim the passed in imap to the next shared/unshared extent boundary, and
232__xfs_reflink_reserve_cow( 233 * if imap->br_startoff points to a shared extent reserve space for it in the
234 * COW fork. In this case *shared is set to true, else to false.
235 *
236 * Note that imap will always contain the block numbers for the existing blocks
237 * in the data fork, as the upper layers need them for read-modify-write
238 * operations.
239 */
240int
241xfs_reflink_reserve_cow(
233 struct xfs_inode *ip, 242 struct xfs_inode *ip,
234 xfs_fileoff_t *offset_fsb, 243 struct xfs_bmbt_irec *imap,
235 xfs_fileoff_t end_fsb, 244 bool *shared)
236 bool *skipped)
237{ 245{
238 struct xfs_bmbt_irec got, prev, imap; 246 struct xfs_bmbt_irec got, prev;
239 xfs_fileoff_t orig_end_fsb; 247 xfs_fileoff_t end_fsb, orig_end_fsb;
240 int nimaps, eof = 0, error = 0; 248 int eof = 0, error = 0;
241 bool shared = false, trimmed = false; 249 bool trimmed;
242 xfs_extnum_t idx; 250 xfs_extnum_t idx;
243 xfs_extlen_t align; 251 xfs_extlen_t align;
244 252
245 /* Already reserved? Skip the refcount btree access. */ 253 /*
246 xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx, 254 * Search the COW fork extent list first. This serves two purposes:
255 * first this implement the speculative preallocation using cowextisze,
256 * so that we also unshared block adjacent to shared blocks instead
257 * of just the shared blocks themselves. Second the lookup in the
258 * extent list is generally faster than going out to the shared extent
259 * tree.
260 */
261 xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx,
247 &got, &prev); 262 &got, &prev);
248 if (!eof && got.br_startoff <= *offset_fsb) { 263 if (!eof && got.br_startoff <= imap->br_startoff) {
249 end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount; 264 trace_xfs_reflink_cow_found(ip, imap);
250 trace_xfs_reflink_cow_found(ip, &got); 265 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
251 goto done;
252 }
253 266
254 /* Read extent from the source file. */ 267 *shared = true;
255 nimaps = 1; 268 return 0;
256 error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb, 269 }
257 &imap, &nimaps, 0);
258 if (error)
259 goto out_unlock;
260 ASSERT(nimaps == 1);
261 270
262 /* Trim the mapping to the nearest shared extent boundary. */ 271 /* Trim the mapping to the nearest shared extent boundary. */
263 error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed); 272 error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
264 if (error) 273 if (error)
265 goto out_unlock; 274 return error;
266
267 end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount;
268 275
269 /* Not shared? Just report the (potentially capped) extent. */ 276 /* Not shared? Just report the (potentially capped) extent. */
270 if (!shared) { 277 if (!*shared)
271 *skipped = true; 278 return 0;
272 goto done;
273 }
274 279
275 /* 280 /*
276 * Fork all the shared blocks from our write offset until the end of 281 * Fork all the shared blocks from our write offset until the end of
@@ -278,72 +283,38 @@ __xfs_reflink_reserve_cow(
278 */ 283 */
279 error = xfs_qm_dqattach_locked(ip, 0); 284 error = xfs_qm_dqattach_locked(ip, 0);
280 if (error) 285 if (error)
281 goto out_unlock; 286 return error;
287
288 end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount;
282 289
283 align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip)); 290 align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip));
284 if (align) 291 if (align)
285 end_fsb = roundup_64(end_fsb, align); 292 end_fsb = roundup_64(end_fsb, align);
286 293
287retry: 294retry:
288 error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb, 295 error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
289 end_fsb - *offset_fsb, &got, 296 end_fsb - imap->br_startoff, &got, &prev, &idx, eof);
290 &prev, &idx, eof);
291 switch (error) { 297 switch (error) {
292 case 0: 298 case 0:
293 break; 299 break;
294 case -ENOSPC: 300 case -ENOSPC:
295 case -EDQUOT: 301 case -EDQUOT:
296 /* retry without any preallocation */ 302 /* retry without any preallocation */
297 trace_xfs_reflink_cow_enospc(ip, &imap); 303 trace_xfs_reflink_cow_enospc(ip, imap);
298 if (end_fsb != orig_end_fsb) { 304 if (end_fsb != orig_end_fsb) {
299 end_fsb = orig_end_fsb; 305 end_fsb = orig_end_fsb;
300 goto retry; 306 goto retry;
301 } 307 }
302 /*FALLTHRU*/ 308 /*FALLTHRU*/
303 default: 309 default:
304 goto out_unlock; 310 return error;
305 } 311 }
306 312
307 if (end_fsb != orig_end_fsb) 313 if (end_fsb != orig_end_fsb)
308 xfs_inode_set_cowblocks_tag(ip); 314 xfs_inode_set_cowblocks_tag(ip);
309 315
310 trace_xfs_reflink_cow_alloc(ip, &got); 316 trace_xfs_reflink_cow_alloc(ip, &got);
311done: 317 return 0;
312 *offset_fsb = end_fsb;
313out_unlock:
314 return error;
315}
316
317/* Create a CoW reservation for part of a file. */
318int
319xfs_reflink_reserve_cow_range(
320 struct xfs_inode *ip,
321 xfs_off_t offset,
322 xfs_off_t count)
323{
324 struct xfs_mount *mp = ip->i_mount;
325 xfs_fileoff_t offset_fsb, end_fsb;
326 bool skipped = false;
327 int error;
328
329 trace_xfs_reflink_reserve_cow_range(ip, offset, count);
330
331 offset_fsb = XFS_B_TO_FSBT(mp, offset);
332 end_fsb = XFS_B_TO_FSB(mp, offset + count);
333
334 xfs_ilock(ip, XFS_ILOCK_EXCL);
335 while (offset_fsb < end_fsb) {
336 error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb,
337 &skipped);
338 if (error) {
339 trace_xfs_reflink_reserve_cow_range_error(ip, error,
340 _RET_IP_);
341 break;
342 }
343 }
344 xfs_iunlock(ip, XFS_ILOCK_EXCL);
345
346 return error;
347} 318}
348 319
349/* Allocate all CoW reservations covering a range of blocks in a file. */ 320/* Allocate all CoW reservations covering a range of blocks in a file. */
@@ -358,9 +329,8 @@ __xfs_reflink_allocate_cow(
358 struct xfs_defer_ops dfops; 329 struct xfs_defer_ops dfops;
359 struct xfs_trans *tp; 330 struct xfs_trans *tp;
360 xfs_fsblock_t first_block; 331 xfs_fsblock_t first_block;
361 xfs_fileoff_t next_fsb;
362 int nimaps = 1, error; 332 int nimaps = 1, error;
363 bool skipped = false; 333 bool shared;
364 334
365 xfs_defer_init(&dfops, &first_block); 335 xfs_defer_init(&dfops, &first_block);
366 336
@@ -371,33 +341,38 @@ __xfs_reflink_allocate_cow(
371 341
372 xfs_ilock(ip, XFS_ILOCK_EXCL); 342 xfs_ilock(ip, XFS_ILOCK_EXCL);
373 343
374 next_fsb = *offset_fsb; 344 /* Read extent from the source file. */
375 error = __xfs_reflink_reserve_cow(ip, &next_fsb, end_fsb, &skipped); 345 nimaps = 1;
346 error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
347 &imap, &nimaps, 0);
348 if (error)
349 goto out_unlock;
350 ASSERT(nimaps == 1);
351
352 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
376 if (error) 353 if (error)
377 goto out_trans_cancel; 354 goto out_trans_cancel;
378 355
379 if (skipped) { 356 if (!shared) {
380 *offset_fsb = next_fsb; 357 *offset_fsb = imap.br_startoff + imap.br_blockcount;
381 goto out_trans_cancel; 358 goto out_trans_cancel;
382 } 359 }
383 360
384 xfs_trans_ijoin(tp, ip, 0); 361 xfs_trans_ijoin(tp, ip, 0);
385 error = xfs_bmapi_write(tp, ip, *offset_fsb, next_fsb - *offset_fsb, 362 error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
386 XFS_BMAPI_COWFORK, &first_block, 363 XFS_BMAPI_COWFORK, &first_block,
387 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 364 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
388 &imap, &nimaps, &dfops); 365 &imap, &nimaps, &dfops);
389 if (error) 366 if (error)
390 goto out_trans_cancel; 367 goto out_trans_cancel;
391 368
392 /* We might not have been able to map the whole delalloc extent */
393 *offset_fsb = min(*offset_fsb + imap.br_blockcount, next_fsb);
394
395 error = xfs_defer_finish(&tp, &dfops, NULL); 369 error = xfs_defer_finish(&tp, &dfops, NULL);
396 if (error) 370 if (error)
397 goto out_trans_cancel; 371 goto out_trans_cancel;
398 372
399 error = xfs_trans_commit(tp); 373 error = xfs_trans_commit(tp);
400 374
375 *offset_fsb = imap.br_startoff + imap.br_blockcount;
401out_unlock: 376out_unlock:
402 xfs_iunlock(ip, XFS_ILOCK_EXCL); 377 xfs_iunlock(ip, XFS_ILOCK_EXCL);
403 return error; 378 return error;
@@ -536,58 +511,49 @@ xfs_reflink_cancel_cow_blocks(
536 xfs_fileoff_t offset_fsb, 511 xfs_fileoff_t offset_fsb,
537 xfs_fileoff_t end_fsb) 512 xfs_fileoff_t end_fsb)
538{ 513{
539 struct xfs_bmbt_irec irec; 514 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
540 xfs_filblks_t count_fsb; 515 struct xfs_bmbt_irec got, prev, del;
516 xfs_extnum_t idx;
541 xfs_fsblock_t firstfsb; 517 xfs_fsblock_t firstfsb;
542 struct xfs_defer_ops dfops; 518 struct xfs_defer_ops dfops;
543 int error = 0; 519 int error = 0, eof = 0;
544 int nimaps;
545 520
546 if (!xfs_is_reflink_inode(ip)) 521 if (!xfs_is_reflink_inode(ip))
547 return 0; 522 return 0;
548 523
549 /* Go find the old extent in the CoW fork. */ 524 xfs_bmap_search_extents(ip, offset_fsb, XFS_COW_FORK, &eof, &idx,
550 while (offset_fsb < end_fsb) { 525 &got, &prev);
551 nimaps = 1; 526 if (eof)
552 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb); 527 return 0;
553 error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
554 &nimaps, XFS_BMAPI_COWFORK);
555 if (error)
556 break;
557 ASSERT(nimaps == 1);
558
559 trace_xfs_reflink_cancel_cow(ip, &irec);
560 528
561 if (irec.br_startblock == DELAYSTARTBLOCK) { 529 while (got.br_startoff < end_fsb) {
562 /* Free a delayed allocation. */ 530 del = got;
563 xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount, 531 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
564 false); 532 trace_xfs_reflink_cancel_cow(ip, &del);
565 ip->i_delayed_blks -= irec.br_blockcount;
566 533
567 /* Remove the mapping from the CoW fork. */ 534 if (isnullstartblock(del.br_startblock)) {
568 error = xfs_bunmapi_cow(ip, &irec); 535 error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
536 &idx, &got, &del);
569 if (error) 537 if (error)
570 break; 538 break;
571 } else if (irec.br_startblock == HOLESTARTBLOCK) {
572 /* empty */
573 } else { 539 } else {
574 xfs_trans_ijoin(*tpp, ip, 0); 540 xfs_trans_ijoin(*tpp, ip, 0);
575 xfs_defer_init(&dfops, &firstfsb); 541 xfs_defer_init(&dfops, &firstfsb);
576 542
577 /* Free the CoW orphan record. */ 543 /* Free the CoW orphan record. */
578 error = xfs_refcount_free_cow_extent(ip->i_mount, 544 error = xfs_refcount_free_cow_extent(ip->i_mount,
579 &dfops, irec.br_startblock, 545 &dfops, del.br_startblock,
580 irec.br_blockcount); 546 del.br_blockcount);
581 if (error) 547 if (error)
582 break; 548 break;
583 549
584 xfs_bmap_add_free(ip->i_mount, &dfops, 550 xfs_bmap_add_free(ip->i_mount, &dfops,
585 irec.br_startblock, irec.br_blockcount, 551 del.br_startblock, del.br_blockcount,
586 NULL); 552 NULL);
587 553
588 /* Update quota accounting */ 554 /* Update quota accounting */
589 xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT, 555 xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
590 -(long)irec.br_blockcount); 556 -(long)del.br_blockcount);
591 557
592 /* Roll the transaction */ 558 /* Roll the transaction */
593 error = xfs_defer_finish(tpp, &dfops, ip); 559 error = xfs_defer_finish(tpp, &dfops, ip);
@@ -597,15 +563,18 @@ xfs_reflink_cancel_cow_blocks(
597 } 563 }
598 564
599 /* Remove the mapping from the CoW fork. */ 565 /* Remove the mapping from the CoW fork. */
600 error = xfs_bunmapi_cow(ip, &irec); 566 xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
601 if (error)
602 break;
603 } 567 }
604 568
605 /* Roll on... */ 569 if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec))
606 offset_fsb = irec.br_startoff + irec.br_blockcount; 570 break;
571 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
607 } 572 }
608 573
574 /* clear tag if cow fork is emptied */
575 if (!ifp->if_bytes)
576 xfs_inode_clear_cowblocks_tag(ip);
577
609 return error; 578 return error;
610} 579}
611 580
@@ -668,25 +637,26 @@ xfs_reflink_end_cow(
668 xfs_off_t offset, 637 xfs_off_t offset,
669 xfs_off_t count) 638 xfs_off_t count)
670{ 639{
671 struct xfs_bmbt_irec irec; 640 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
672 struct xfs_bmbt_irec uirec; 641 struct xfs_bmbt_irec got, prev, del;
673 struct xfs_trans *tp; 642 struct xfs_trans *tp;
674 xfs_fileoff_t offset_fsb; 643 xfs_fileoff_t offset_fsb;
675 xfs_fileoff_t end_fsb; 644 xfs_fileoff_t end_fsb;
676 xfs_filblks_t count_fsb;
677 xfs_fsblock_t firstfsb; 645 xfs_fsblock_t firstfsb;
678 struct xfs_defer_ops dfops; 646 struct xfs_defer_ops dfops;
679 int error; 647 int error, eof = 0;
680 unsigned int resblks; 648 unsigned int resblks;
681 xfs_filblks_t ilen;
682 xfs_filblks_t rlen; 649 xfs_filblks_t rlen;
683 int nimaps; 650 xfs_extnum_t idx;
684 651
685 trace_xfs_reflink_end_cow(ip, offset, count); 652 trace_xfs_reflink_end_cow(ip, offset, count);
686 653
654 /* No COW extents? That's easy! */
655 if (ifp->if_bytes == 0)
656 return 0;
657
687 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 658 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
688 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count); 659 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
689 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
690 660
691 /* Start a rolling transaction to switch the mappings */ 661 /* Start a rolling transaction to switch the mappings */
692 resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); 662 resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
@@ -698,72 +668,65 @@ xfs_reflink_end_cow(
698 xfs_ilock(ip, XFS_ILOCK_EXCL); 668 xfs_ilock(ip, XFS_ILOCK_EXCL);
699 xfs_trans_ijoin(tp, ip, 0); 669 xfs_trans_ijoin(tp, ip, 0);
700 670
701 /* Go find the old extent in the CoW fork. */ 671 xfs_bmap_search_extents(ip, end_fsb - 1, XFS_COW_FORK, &eof, &idx,
702 while (offset_fsb < end_fsb) { 672 &got, &prev);
703 /* Read extent from the source file */
704 nimaps = 1;
705 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
706 error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
707 &nimaps, XFS_BMAPI_COWFORK);
708 if (error)
709 goto out_cancel;
710 ASSERT(nimaps == 1);
711 673
712 ASSERT(irec.br_startblock != DELAYSTARTBLOCK); 674 /* If there is a hole at end_fsb - 1 go to the previous extent */
713 trace_xfs_reflink_cow_remap(ip, &irec); 675 if (eof || got.br_startoff > end_fsb) {
676 ASSERT(idx > 0);
677 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
678 }
714 679
715 /* 680 /* Walk backwards until we're out of the I/O range... */
716 * We can have a hole in the CoW fork if part of a directio 681 while (got.br_startoff + got.br_blockcount > offset_fsb) {
717 * write is CoW but part of it isn't. 682 del = got;
718 */ 683 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
719 rlen = ilen = irec.br_blockcount; 684
720 if (irec.br_startblock == HOLESTARTBLOCK) 685 /* Extent delete may have bumped idx forward */
686 if (!del.br_blockcount) {
687 idx--;
721 goto next_extent; 688 goto next_extent;
689 }
690
691 ASSERT(!isnullstartblock(got.br_startblock));
722 692
723 /* Unmap the old blocks in the data fork. */ 693 /* Unmap the old blocks in the data fork. */
724 while (rlen) { 694 xfs_defer_init(&dfops, &firstfsb);
725 xfs_defer_init(&dfops, &firstfsb); 695 rlen = del.br_blockcount;
726 error = __xfs_bunmapi(tp, ip, irec.br_startoff, 696 error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
727 &rlen, 0, 1, &firstfsb, &dfops); 697 &firstfsb, &dfops);
728 if (error) 698 if (error)
729 goto out_defer; 699 goto out_defer;
730
731 /*
732 * Trim the extent to whatever got unmapped.
733 * Remember, bunmapi works backwards.
734 */
735 uirec.br_startblock = irec.br_startblock + rlen;
736 uirec.br_startoff = irec.br_startoff + rlen;
737 uirec.br_blockcount = irec.br_blockcount - rlen;
738 irec.br_blockcount = rlen;
739 trace_xfs_reflink_cow_remap_piece(ip, &uirec);
740 700
741 /* Free the CoW orphan record. */ 701 /* Trim the extent to whatever got unmapped. */
742 error = xfs_refcount_free_cow_extent(tp->t_mountp, 702 if (rlen) {
743 &dfops, uirec.br_startblock, 703 xfs_trim_extent(&del, del.br_startoff + rlen,
744 uirec.br_blockcount); 704 del.br_blockcount - rlen);
745 if (error) 705 }
746 goto out_defer; 706 trace_xfs_reflink_cow_remap(ip, &del);
747 707
748 /* Map the new blocks into the data fork. */ 708 /* Free the CoW orphan record. */
749 error = xfs_bmap_map_extent(tp->t_mountp, &dfops, 709 error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
750 ip, &uirec); 710 del.br_startblock, del.br_blockcount);
751 if (error) 711 if (error)
752 goto out_defer; 712 goto out_defer;
753 713
754 /* Remove the mapping from the CoW fork. */ 714 /* Map the new blocks into the data fork. */
755 error = xfs_bunmapi_cow(ip, &uirec); 715 error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
756 if (error) 716 if (error)
757 goto out_defer; 717 goto out_defer;
758 718
759 error = xfs_defer_finish(&tp, &dfops, ip); 719 /* Remove the mapping from the CoW fork. */
760 if (error) 720 xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
761 goto out_defer; 721
762 } 722 error = xfs_defer_finish(&tp, &dfops, ip);
723 if (error)
724 goto out_defer;
763 725
764next_extent: 726next_extent:
765 /* Roll on... */ 727 if (idx < 0)
766 offset_fsb = irec.br_startoff + ilen; 728 break;
729 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
767 } 730 }
768 731
769 error = xfs_trans_commit(tp); 732 error = xfs_trans_commit(tp);
@@ -774,7 +737,6 @@ next_extent:
774 737
775out_defer: 738out_defer:
776 xfs_defer_cancel(&dfops); 739 xfs_defer_cancel(&dfops);
777out_cancel:
778 xfs_trans_cancel(tp); 740 xfs_trans_cancel(tp);
779 xfs_iunlock(ip, XFS_ILOCK_EXCL); 741 xfs_iunlock(ip, XFS_ILOCK_EXCL);
780out: 742out:
@@ -1312,19 +1274,26 @@ out_error:
1312 */ 1274 */
1313int 1275int
1314xfs_reflink_remap_range( 1276xfs_reflink_remap_range(
1315 struct xfs_inode *src, 1277 struct file *file_in,
1316 xfs_off_t srcoff, 1278 loff_t pos_in,
1317 struct xfs_inode *dest, 1279 struct file *file_out,
1318 xfs_off_t destoff, 1280 loff_t pos_out,
1319 xfs_off_t len, 1281 u64 len,
1320 unsigned int flags) 1282 bool is_dedupe)
1321{ 1283{
1284 struct inode *inode_in = file_inode(file_in);
1285 struct xfs_inode *src = XFS_I(inode_in);
1286 struct inode *inode_out = file_inode(file_out);
1287 struct xfs_inode *dest = XFS_I(inode_out);
1322 struct xfs_mount *mp = src->i_mount; 1288 struct xfs_mount *mp = src->i_mount;
1289 loff_t bs = inode_out->i_sb->s_blocksize;
1290 bool same_inode = (inode_in == inode_out);
1323 xfs_fileoff_t sfsbno, dfsbno; 1291 xfs_fileoff_t sfsbno, dfsbno;
1324 xfs_filblks_t fsblen; 1292 xfs_filblks_t fsblen;
1325 int error;
1326 xfs_extlen_t cowextsize; 1293 xfs_extlen_t cowextsize;
1327 bool is_same; 1294 loff_t isize;
1295 ssize_t ret;
1296 loff_t blen;
1328 1297
1329 if (!xfs_sb_version_hasreflink(&mp->m_sb)) 1298 if (!xfs_sb_version_hasreflink(&mp->m_sb))
1330 return -EOPNOTSUPP; 1299 return -EOPNOTSUPP;
@@ -1332,17 +1301,8 @@ xfs_reflink_remap_range(
1332 if (XFS_FORCED_SHUTDOWN(mp)) 1301 if (XFS_FORCED_SHUTDOWN(mp))
1333 return -EIO; 1302 return -EIO;
1334 1303
1335 /* Don't reflink realtime inodes */
1336 if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
1337 return -EINVAL;
1338
1339 if (flags & ~XFS_REFLINK_ALL)
1340 return -EINVAL;
1341
1342 trace_xfs_reflink_remap_range(src, srcoff, len, dest, destoff);
1343
1344 /* Lock both files against IO */ 1304 /* Lock both files against IO */
1345 if (src->i_ino == dest->i_ino) { 1305 if (same_inode) {
1346 xfs_ilock(src, XFS_IOLOCK_EXCL); 1306 xfs_ilock(src, XFS_IOLOCK_EXCL);
1347 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1307 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1348 } else { 1308 } else {
@@ -1350,39 +1310,126 @@ xfs_reflink_remap_range(
1350 xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL); 1310 xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
1351 } 1311 }
1352 1312
1313 /* Don't touch certain kinds of inodes */
1314 ret = -EPERM;
1315 if (IS_IMMUTABLE(inode_out))
1316 goto out_unlock;
1317
1318 ret = -ETXTBSY;
1319 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
1320 goto out_unlock;
1321
1322
1323 /* Don't reflink dirs, pipes, sockets... */
1324 ret = -EISDIR;
1325 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1326 goto out_unlock;
1327 ret = -EINVAL;
1328 if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
1329 goto out_unlock;
1330 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1331 goto out_unlock;
1332
1333 /* Don't reflink realtime inodes */
1334 if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
1335 goto out_unlock;
1336
1337 /* Don't share DAX file data for now. */
1338 if (IS_DAX(inode_in) || IS_DAX(inode_out))
1339 goto out_unlock;
1340
1341 /* Are we going all the way to the end? */
1342 isize = i_size_read(inode_in);
1343 if (isize == 0) {
1344 ret = 0;
1345 goto out_unlock;
1346 }
1347
1348 if (len == 0)
1349 len = isize - pos_in;
1350
1351 /* Ensure offsets don't wrap and the input is inside i_size */
1352 if (pos_in + len < pos_in || pos_out + len < pos_out ||
1353 pos_in + len > isize)
1354 goto out_unlock;
1355
1356 /* Don't allow dedupe past EOF in the dest file */
1357 if (is_dedupe) {
1358 loff_t disize;
1359
1360 disize = i_size_read(inode_out);
1361 if (pos_out >= disize || pos_out + len > disize)
1362 goto out_unlock;
1363 }
1364
1365 /* If we're linking to EOF, continue to the block boundary. */
1366 if (pos_in + len == isize)
1367 blen = ALIGN(isize, bs) - pos_in;
1368 else
1369 blen = len;
1370
1371 /* Only reflink if we're aligned to block boundaries */
1372 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
1373 !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
1374 goto out_unlock;
1375
1376 /* Don't allow overlapped reflink within the same file */
1377 if (same_inode) {
1378 if (pos_out + blen > pos_in && pos_out < pos_in + blen)
1379 goto out_unlock;
1380 }
1381
1382 /* Wait for the completion of any pending IOs on both files */
1383 inode_dio_wait(inode_in);
1384 if (!same_inode)
1385 inode_dio_wait(inode_out);
1386
1387 ret = filemap_write_and_wait_range(inode_in->i_mapping,
1388 pos_in, pos_in + len - 1);
1389 if (ret)
1390 goto out_unlock;
1391
1392 ret = filemap_write_and_wait_range(inode_out->i_mapping,
1393 pos_out, pos_out + len - 1);
1394 if (ret)
1395 goto out_unlock;
1396
1397 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1398
1353 /* 1399 /*
1354 * Check that the extents are the same. 1400 * Check that the extents are the same.
1355 */ 1401 */
1356 if (flags & XFS_REFLINK_DEDUPE) { 1402 if (is_dedupe) {
1357 is_same = false; 1403 bool is_same = false;
1358 error = xfs_compare_extents(VFS_I(src), srcoff, VFS_I(dest), 1404
1359 destoff, len, &is_same); 1405 ret = xfs_compare_extents(inode_in, pos_in, inode_out, pos_out,
1360 if (error) 1406 len, &is_same);
1361 goto out_error; 1407 if (ret)
1408 goto out_unlock;
1362 if (!is_same) { 1409 if (!is_same) {
1363 error = -EBADE; 1410 ret = -EBADE;
1364 goto out_error; 1411 goto out_unlock;
1365 } 1412 }
1366 } 1413 }
1367 1414
1368 error = xfs_reflink_set_inode_flag(src, dest); 1415 ret = xfs_reflink_set_inode_flag(src, dest);
1369 if (error) 1416 if (ret)
1370 goto out_error; 1417 goto out_unlock;
1371 1418
1372 /* 1419 /*
1373 * Invalidate the page cache so that we can clear any CoW mappings 1420 * Invalidate the page cache so that we can clear any CoW mappings
1374 * in the destination file. 1421 * in the destination file.
1375 */ 1422 */
1376 truncate_inode_pages_range(&VFS_I(dest)->i_data, destoff, 1423 truncate_inode_pages_range(&inode_out->i_data, pos_out,
1377 PAGE_ALIGN(destoff + len) - 1); 1424 PAGE_ALIGN(pos_out + len) - 1);
1378 1425
1379 dfsbno = XFS_B_TO_FSBT(mp, destoff); 1426 dfsbno = XFS_B_TO_FSBT(mp, pos_out);
1380 sfsbno = XFS_B_TO_FSBT(mp, srcoff); 1427 sfsbno = XFS_B_TO_FSBT(mp, pos_in);
1381 fsblen = XFS_B_TO_FSB(mp, len); 1428 fsblen = XFS_B_TO_FSB(mp, len);
1382 error = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen, 1429 ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
1383 destoff + len); 1430 pos_out + len);
1384 if (error) 1431 if (ret)
1385 goto out_error; 1432 goto out_unlock;
1386 1433
1387 /* 1434 /*
1388 * Carry the cowextsize hint from src to dest if we're sharing the 1435 * Carry the cowextsize hint from src to dest if we're sharing the
@@ -1390,26 +1437,24 @@ xfs_reflink_remap_range(
1390 * has a cowextsize hint, and the destination file does not. 1437 * has a cowextsize hint, and the destination file does not.
1391 */ 1438 */
1392 cowextsize = 0; 1439 cowextsize = 0;
1393 if (srcoff == 0 && len == i_size_read(VFS_I(src)) && 1440 if (pos_in == 0 && len == i_size_read(inode_in) &&
1394 (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) && 1441 (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1395 destoff == 0 && len >= i_size_read(VFS_I(dest)) && 1442 pos_out == 0 && len >= i_size_read(inode_out) &&
1396 !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) 1443 !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1397 cowextsize = src->i_d.di_cowextsize; 1444 cowextsize = src->i_d.di_cowextsize;
1398 1445
1399 error = xfs_reflink_update_dest(dest, destoff + len, cowextsize); 1446 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize);
1400 if (error)
1401 goto out_error;
1402 1447
1403out_error: 1448out_unlock:
1404 xfs_iunlock(src, XFS_MMAPLOCK_EXCL); 1449 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1405 xfs_iunlock(src, XFS_IOLOCK_EXCL); 1450 xfs_iunlock(src, XFS_IOLOCK_EXCL);
1406 if (src->i_ino != dest->i_ino) { 1451 if (src->i_ino != dest->i_ino) {
1407 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1452 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1408 xfs_iunlock(dest, XFS_IOLOCK_EXCL); 1453 xfs_iunlock(dest, XFS_IOLOCK_EXCL);
1409 } 1454 }
1410 if (error) 1455 if (ret)
1411 trace_xfs_reflink_remap_range_error(dest, error, _RET_IP_); 1456 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1412 return error; 1457 return ret;
1413} 1458}
1414 1459
1415/* 1460/*
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 5dc3c8ac12aa..fad11607c9ad 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -26,8 +26,8 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
26extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip, 26extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
27 struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed); 27 struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
28 28
29extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip, 29extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
30 xfs_off_t offset, xfs_off_t count); 30 struct xfs_bmbt_irec *imap, bool *shared);
31extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip, 31extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
32 xfs_off_t offset, xfs_off_t count); 32 xfs_off_t offset, xfs_off_t count);
33extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset, 33extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
@@ -43,11 +43,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
43extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset, 43extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
44 xfs_off_t count); 44 xfs_off_t count);
45extern int xfs_reflink_recover_cow(struct xfs_mount *mp); 45extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
46#define XFS_REFLINK_DEDUPE 1 /* only reflink if contents match */ 46extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
47#define XFS_REFLINK_ALL (XFS_REFLINK_DEDUPE) 47 struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
48extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff,
49 struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len,
50 unsigned int flags);
51extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip, 48extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
52 struct xfs_trans **tpp); 49 struct xfs_trans **tpp);
53extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset, 50extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 5f8d55d29a11..276d3023d60f 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
512}; 512};
513 513
514 514
515struct kobj_type xfs_error_cfg_ktype = { 515static struct kobj_type xfs_error_cfg_ktype = {
516 .release = xfs_sysfs_release, 516 .release = xfs_sysfs_release,
517 .sysfs_ops = &xfs_sysfs_ops, 517 .sysfs_ops = &xfs_sysfs_ops,
518 .default_attrs = xfs_error_attrs, 518 .default_attrs = xfs_error_attrs,
519}; 519};
520 520
521struct kobj_type xfs_error_ktype = { 521static struct kobj_type xfs_error_ktype = {
522 .release = xfs_sysfs_release, 522 .release = xfs_sysfs_release,
523 .sysfs_ops = &xfs_sysfs_ops, 523 .sysfs_ops = &xfs_sysfs_ops,
524}; 524};
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad188d3a83f3..0907752be62d 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
3346DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found); 3346DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
3347DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc); 3347DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
3348 3348
3349DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range); 3349DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
3350DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range); 3350DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
3351 3351
3352DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write); 3352DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
3356DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range); 3356DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
3357DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow); 3357DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
3358DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap); 3358DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
3359DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece);
3360 3359
3361DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error);
3362DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error); 3360DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
3363DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error); 3361DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
3364DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error); 3362DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 17a940a14477..8caa79c61703 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -21,7 +21,7 @@ extern void pcc_mbox_free_channel(struct mbox_chan *chan);
21static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, 21static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
22 int subspace_id) 22 int subspace_id)
23{ 23{
24 return NULL; 24 return ERR_PTR(-ENODEV);
25} 25}
26static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { } 26static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
27#endif 27#endif
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 43199a049da5..63554e9f6e0c 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -70,7 +70,7 @@ KSYM(__kcrctab_\name):
70#include <generated/autoksyms.h> 70#include <generated/autoksyms.h>
71 71
72#define __EXPORT_SYMBOL(sym, val, sec) \ 72#define __EXPORT_SYMBOL(sym, val, sec) \
73 __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) 73 __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
74#define __cond_export_sym(sym, val, sec, conf) \ 74#define __cond_export_sym(sym, val, sec, conf) \
75 ___cond_export_sym(sym, val, sec, conf) 75 ___cond_export_sym(sym, val, sec, conf)
76#define ___cond_export_sym(sym, val, sec, enabled) \ 76#define ___cond_export_sym(sym, val, sec, enabled) \
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ddbeda6dbdc8..689a8b9b9c8f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -326,6 +326,7 @@ struct pci_dev;
326int acpi_pci_irq_enable (struct pci_dev *dev); 326int acpi_pci_irq_enable (struct pci_dev *dev);
327void acpi_penalize_isa_irq(int irq, int active); 327void acpi_penalize_isa_irq(int irq, int active);
328bool acpi_isa_irq_available(int irq); 328bool acpi_isa_irq_available(int irq);
329void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
329void acpi_pci_irq_disable (struct pci_dev *dev); 330void acpi_pci_irq_disable (struct pci_dev *dev);
330 331
331extern int ec_read(u8 addr, u8 *val); 332extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index af596381fa0f..a428aec36ace 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
785 * routines, one at of_clk_init(), and one at platform device probe 785 * routines, one at of_clk_init(), and one at platform device probe
786 */ 786 */
787#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \ 787#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
788 static void name##_of_clk_init_driver(struct device_node *np) \ 788 static void __init name##_of_clk_init_driver(struct device_node *np) \
789 { \ 789 { \
790 of_node_clear_flag(np, OF_POPULATED); \ 790 of_node_clear_flag(np, OF_POPULATED); \
791 fn(np); \ 791 fn(np); \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5fa55fc56e18..32dc0cbd51ca 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -677,10 +677,10 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
677 if (best == table - 1) 677 if (best == table - 1)
678 return pos - table; 678 return pos - table;
679 679
680 return best - pos; 680 return best - table;
681 } 681 }
682 682
683 return best - pos; 683 return best - table;
684} 684}
685 685
686/* Works only on sorted freq-tables */ 686/* Works only on sorted freq-tables */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9b207a8c5af3..afe641c02dca 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -81,6 +81,7 @@ enum cpuhp_state {
81 CPUHP_AP_ARM_ARCH_TIMER_STARTING, 81 CPUHP_AP_ARM_ARCH_TIMER_STARTING,
82 CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, 82 CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
83 CPUHP_AP_DUMMY_TIMER_STARTING, 83 CPUHP_AP_DUMMY_TIMER_STARTING,
84 CPUHP_AP_JCORE_TIMER_STARTING,
84 CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, 85 CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
85 CPUHP_AP_ARM_TWD_STARTING, 86 CPUHP_AP_ARM_TWD_STARTING,
86 CPUHP_AP_METAG_TIMER_STARTING, 87 CPUHP_AP_METAG_TIMER_STARTING,
diff --git a/include/linux/io.h b/include/linux/io.h
index e2c8419278c1..82ef36eac8a1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -141,4 +141,26 @@ enum {
141void *memremap(resource_size_t offset, size_t size, unsigned long flags); 141void *memremap(resource_size_t offset, size_t size, unsigned long flags);
142void memunmap(void *addr); 142void memunmap(void *addr);
143 143
144/*
145 * On x86 PAT systems we have memory tracking that keeps track of
146 * the allowed mappings on memory ranges. This tracking works for
147 * all the in-kernel mapping APIs (ioremap*), but where the user
148 * wishes to map a range from a physical device into user memory
149 * the tracking won't be updated. This API is to be used by
150 * drivers which remap physical device pages into userspace,
151 * and wants to make sure they are mapped WC and not UC.
152 */
153#ifndef arch_io_reserve_memtype_wc
154static inline int arch_io_reserve_memtype_wc(resource_size_t base,
155 resource_size_t size)
156{
157 return 0;
158}
159
160static inline void arch_io_free_memtype_wc(resource_size_t base,
161 resource_size_t size)
162{
163}
164#endif
165
144#endif /* _LINUX_IO_H */ 166#endif /* _LINUX_IO_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e63e288dee83..7892f55a1866 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,11 +19,15 @@ struct vm_fault;
19#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ 19#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
20 20
21/* 21/*
22 * Flags for iomap mappings: 22 * Flags for all iomap mappings:
23 */ 23 */
24#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ 24#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
25#define IOMAP_F_SHARED 0x02 /* block shared with another file */ 25
26#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */ 26/*
27 * Flags that only need to be reported for IOMAP_REPORT requests:
28 */
29#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
30#define IOMAP_F_SHARED 0x20 /* block shared with another file */
27 31
28/* 32/*
29 * Magic value for blkno: 33 * Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
42/* 46/*
43 * Flags for iomap_begin / iomap_end. No flag implies a read. 47 * Flags for iomap_begin / iomap_end. No flag implies a read.
44 */ 48 */
45#define IOMAP_WRITE (1 << 0) 49#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
46#define IOMAP_ZERO (1 << 1) 50#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
51#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
47 52
48struct iomap_ops { 53struct iomap_ops {
49 /* 54 /*
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8361c8d3edd1..b7e34313cdfe 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -290,7 +290,7 @@
290#define GITS_BASER_TYPE_SHIFT (56) 290#define GITS_BASER_TYPE_SHIFT (56)
291#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) 291#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
292#define GITS_BASER_ENTRY_SIZE_SHIFT (48) 292#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
293#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) 293#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
294#define GITS_BASER_SHAREABILITY_SHIFT (10) 294#define GITS_BASER_SHAREABILITY_SHIFT (10)
295#define GITS_BASER_InnerShareable \ 295#define GITS_BASER_InnerShareable \
296 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) 296 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d600303306eb..820c0ad54a01 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
44void kasan_unpoison_shadow(const void *address, size_t size); 44void kasan_unpoison_shadow(const void *address, size_t size);
45 45
46void kasan_unpoison_task_stack(struct task_struct *task); 46void kasan_unpoison_task_stack(struct task_struct *task);
47void kasan_unpoison_stack_above_sp_to(const void *watermark);
47 48
48void kasan_alloc_pages(struct page *page, unsigned int order); 49void kasan_alloc_pages(struct page *page, unsigned int order);
49void kasan_free_pages(struct page *page, unsigned int order); 50void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
85static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 86static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
86 87
87static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 88static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
89static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
88 90
89static inline void kasan_enable_current(void) {} 91static inline void kasan_enable_current(void) {}
90static inline void kasan_disable_current(void) {} 92static inline void kasan_disable_current(void) {}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 15ec117ec537..8f2e059e4d45 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -31,7 +31,6 @@
31 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when 31 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
32 * the last step cherry picks the 2nd arg, we get a zero. 32 * the last step cherry picks the 2nd arg, we get a zero.
33 */ 33 */
34#define config_enabled(cfg) ___is_defined(cfg)
35#define __is_defined(x) ___is_defined(x) 34#define __is_defined(x) ___is_defined(x)
36#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val) 35#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
37#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0) 36#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
@@ -41,13 +40,13 @@
41 * otherwise. For boolean options, this is equivalent to 40 * otherwise. For boolean options, this is equivalent to
42 * IS_ENABLED(CONFIG_FOO). 41 * IS_ENABLED(CONFIG_FOO).
43 */ 42 */
44#define IS_BUILTIN(option) config_enabled(option) 43#define IS_BUILTIN(option) __is_defined(option)
45 44
46/* 45/*
47 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 46 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
48 * otherwise. 47 * otherwise.
49 */ 48 */
50#define IS_MODULE(option) config_enabled(option##_MODULE) 49#define IS_MODULE(option) __is_defined(option##_MODULE)
51 50
52/* 51/*
53 * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled 52 * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6a51e9..a92c8d73aeaf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk,
1266} 1266}
1267#endif 1267#endif
1268 1268
1269extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1269extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1270 unsigned int gup_flags);
1270extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1271extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1271 void *buf, int len, int write); 1272 void *buf, int len, unsigned int gup_flags);
1272 1273
1273long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1274 unsigned long start, unsigned long nr_pages,
1275 unsigned int foll_flags, struct page **pages,
1276 struct vm_area_struct **vmas, int *nonblocking);
1277long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1274long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1278 unsigned long start, unsigned long nr_pages, 1275 unsigned long start, unsigned long nr_pages,
1279 int write, int force, struct page **pages, 1276 unsigned int gup_flags, struct page **pages,
1280 struct vm_area_struct **vmas); 1277 struct vm_area_struct **vmas);
1281long get_user_pages(unsigned long start, unsigned long nr_pages, 1278long get_user_pages(unsigned long start, unsigned long nr_pages,
1282 int write, int force, struct page **pages, 1279 unsigned int gup_flags, struct page **pages,
1283 struct vm_area_struct **vmas); 1280 struct vm_area_struct **vmas);
1284long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1281long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1285 int write, int force, struct page **pages, int *locked); 1282 unsigned int gup_flags, struct page **pages, int *locked);
1286long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1283long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1287 unsigned long start, unsigned long nr_pages, 1284 unsigned long start, unsigned long nr_pages,
1288 int write, int force, struct page **pages, 1285 struct page **pages, unsigned int gup_flags);
1289 unsigned int gup_flags);
1290long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1286long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1291 int write, int force, struct page **pages); 1287 struct page **pages, unsigned int gup_flags);
1292int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1288int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1293 struct page **pages); 1289 struct page **pages);
1294 1290
@@ -1306,7 +1302,7 @@ struct frame_vector {
1306struct frame_vector *frame_vector_create(unsigned int nr_frames); 1302struct frame_vector *frame_vector_create(unsigned int nr_frames);
1307void frame_vector_destroy(struct frame_vector *vec); 1303void frame_vector_destroy(struct frame_vector *vec);
1308int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1304int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1309 bool write, bool force, struct frame_vector *vec); 1305 unsigned int gup_flags, struct frame_vector *vec);
1310void put_vaddr_frames(struct frame_vector *vec); 1306void put_vaddr_frames(struct frame_vector *vec);
1311int frame_vector_to_pages(struct frame_vector *vec); 1307int frame_vector_to_pages(struct frame_vector *vec);
1312void frame_vector_to_pfns(struct frame_vector *vec); 1308void frame_vector_to_pfns(struct frame_vector *vec);
@@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
1391 !vma_growsup(vma->vm_next, addr); 1387 !vma_growsup(vma->vm_next, addr);
1392} 1388}
1393 1389
1394int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); 1390int vma_is_stack_for_current(struct vm_area_struct *vma);
1395 1391
1396extern unsigned long move_page_tables(struct vm_area_struct *vma, 1392extern unsigned long move_page_tables(struct vm_area_struct *vma,
1397 unsigned long old_addr, struct vm_area_struct *new_vma, 1393 unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
2232#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2228#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
2233#define FOLL_MLOCK 0x1000 /* lock present pages */ 2229#define FOLL_MLOCK 0x1000 /* lock present pages */
2234#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2230#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2231#define FOLL_COW 0x4000 /* internal GUP flag */
2235 2232
2236typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2233typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2237 void *data); 2234 void *data);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7f2ae99e5daf..0f088f3a2fed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -440,33 +440,7 @@ struct zone {
440 seqlock_t span_seqlock; 440 seqlock_t span_seqlock;
441#endif 441#endif
442 442
443 /* 443 int initialized;
444 * wait_table -- the array holding the hash table
445 * wait_table_hash_nr_entries -- the size of the hash table array
446 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
447 *
448 * The purpose of all these is to keep track of the people
449 * waiting for a page to become available and make them
450 * runnable again when possible. The trouble is that this
451 * consumes a lot of space, especially when so few things
452 * wait on pages at a given time. So instead of using
453 * per-page waitqueues, we use a waitqueue hash table.
454 *
455 * The bucket discipline is to sleep on the same queue when
456 * colliding and wake all in that wait queue when removing.
457 * When something wakes, it must check to be sure its page is
458 * truly available, a la thundering herd. The cost of a
459 * collision is great, but given the expected load of the
460 * table, they should be so rare as to be outweighed by the
461 * benefits from the saved space.
462 *
463 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
464 * primary users of these fields, and in mm/page_alloc.c
465 * free_area_init_core() performs the initialization of them.
466 */
467 wait_queue_head_t *wait_table;
468 unsigned long wait_table_hash_nr_entries;
469 unsigned long wait_table_bits;
470 444
471 /* Write-intensive fields used from the page allocator */ 445 /* Write-intensive fields used from the page allocator */
472 ZONE_PADDING(_pad1_) 446 ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
546 520
547static inline bool zone_is_initialized(struct zone *zone) 521static inline bool zone_is_initialized(struct zone *zone)
548{ 522{
549 return !!zone->wait_table; 523 return zone->initialized;
550} 524}
551 525
552static inline bool zone_is_empty(struct zone *zone) 526static inline bool zone_is_empty(struct zone *zone)
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 7676557ce357..fc3c24206593 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -16,7 +16,6 @@
16#define _LINUX_NVME_H 16#define _LINUX_NVME_H
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/uuid.h>
20 19
21/* NQN names in commands fields specified one size */ 20/* NQN names in commands fields specified one size */
22#define NVMF_NQN_FIELD_LEN 256 21#define NVMF_NQN_FIELD_LEN 256
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
182 char fr[8]; 181 char fr[8];
183 __u8 rab; 182 __u8 rab;
184 __u8 ieee[3]; 183 __u8 ieee[3];
185 __u8 mic; 184 __u8 cmic;
186 __u8 mdts; 185 __u8 mdts;
187 __le16 cntlid; 186 __le16 cntlid;
188 __le32 ver; 187 __le32 ver;
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
202 __u8 apsta; 201 __u8 apsta;
203 __le16 wctemp; 202 __le16 wctemp;
204 __le16 cctemp; 203 __le16 cctemp;
205 __u8 rsvd270[50]; 204 __le16 mtfa;
205 __le32 hmpre;
206 __le32 hmmin;
207 __u8 tnvmcap[16];
208 __u8 unvmcap[16];
209 __le32 rpmbs;
210 __u8 rsvd316[4];
206 __le16 kas; 211 __le16 kas;
207 __u8 rsvd322[190]; 212 __u8 rsvd322[190];
208 __u8 sqes; 213 __u8 sqes;
@@ -267,7 +272,7 @@ struct nvme_id_ns {
267 __le16 nabo; 272 __le16 nabo;
268 __le16 nabspf; 273 __le16 nabspf;
269 __u16 rsvd46; 274 __u16 rsvd46;
270 __le64 nvmcap[2]; 275 __u8 nvmcap[16];
271 __u8 rsvd64[40]; 276 __u8 rsvd64[40];
272 __u8 nguid[16]; 277 __u8 nguid[16];
273 __u8 eui64[8]; 278 __u8 eui64[8];
@@ -277,6 +282,16 @@ struct nvme_id_ns {
277}; 282};
278 283
279enum { 284enum {
285 NVME_ID_CNS_NS = 0x00,
286 NVME_ID_CNS_CTRL = 0x01,
287 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
288 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
289 NVME_ID_CNS_NS_PRESENT = 0x11,
290 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
291 NVME_ID_CNS_CTRL_LIST = 0x13,
292};
293
294enum {
280 NVME_NS_FEAT_THIN = 1 << 0, 295 NVME_NS_FEAT_THIN = 1 << 0,
281 NVME_NS_FLBAS_LBA_MASK = 0xf, 296 NVME_NS_FLBAS_LBA_MASK = 0xf,
282 NVME_NS_FLBAS_META_EXT = 0x10, 297 NVME_NS_FLBAS_META_EXT = 0x10,
@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
556 nvme_admin_set_features = 0x09, 571 nvme_admin_set_features = 0x09,
557 nvme_admin_get_features = 0x0a, 572 nvme_admin_get_features = 0x0a,
558 nvme_admin_async_event = 0x0c, 573 nvme_admin_async_event = 0x0c,
574 nvme_admin_ns_mgmt = 0x0d,
559 nvme_admin_activate_fw = 0x10, 575 nvme_admin_activate_fw = 0x10,
560 nvme_admin_download_fw = 0x11, 576 nvme_admin_download_fw = 0x11,
577 nvme_admin_ns_attach = 0x15,
561 nvme_admin_keep_alive = 0x18, 578 nvme_admin_keep_alive = 0x18,
562 nvme_admin_format_nvm = 0x80, 579 nvme_admin_format_nvm = 0x80,
563 nvme_admin_security_send = 0x81, 580 nvme_admin_security_send = 0x81,
@@ -583,6 +600,7 @@ enum {
583 NVME_FEAT_WRITE_ATOMIC = 0x0a, 600 NVME_FEAT_WRITE_ATOMIC = 0x0a,
584 NVME_FEAT_ASYNC_EVENT = 0x0b, 601 NVME_FEAT_ASYNC_EVENT = 0x0b,
585 NVME_FEAT_AUTO_PST = 0x0c, 602 NVME_FEAT_AUTO_PST = 0x0c,
603 NVME_FEAT_HOST_MEM_BUF = 0x0d,
586 NVME_FEAT_KATO = 0x0f, 604 NVME_FEAT_KATO = 0x0f,
587 NVME_FEAT_SW_PROGRESS = 0x80, 605 NVME_FEAT_SW_PROGRESS = 0x80,
588 NVME_FEAT_HOST_ID = 0x81, 606 NVME_FEAT_HOST_ID = 0x81,
@@ -745,7 +763,7 @@ struct nvmf_common_command {
745struct nvmf_disc_rsp_page_entry { 763struct nvmf_disc_rsp_page_entry {
746 __u8 trtype; 764 __u8 trtype;
747 __u8 adrfam; 765 __u8 adrfam;
748 __u8 nqntype; 766 __u8 subtype;
749 __u8 treq; 767 __u8 treq;
750 __le16 portid; 768 __le16 portid;
751 __le16 cntlid; 769 __le16 cntlid;
@@ -794,7 +812,7 @@ struct nvmf_connect_command {
794}; 812};
795 813
796struct nvmf_connect_data { 814struct nvmf_connect_data {
797 uuid_be hostid; 815 __u8 hostid[16];
798 __le16 cntlid; 816 __le16 cntlid;
799 char resv4[238]; 817 char resv4[238];
800 char subsysnqn[NVMF_NQN_FIELD_LEN]; 818 char subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -905,12 +923,23 @@ enum {
905 NVME_SC_INVALID_VECTOR = 0x108, 923 NVME_SC_INVALID_VECTOR = 0x108,
906 NVME_SC_INVALID_LOG_PAGE = 0x109, 924 NVME_SC_INVALID_LOG_PAGE = 0x109,
907 NVME_SC_INVALID_FORMAT = 0x10a, 925 NVME_SC_INVALID_FORMAT = 0x10a,
908 NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, 926 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
909 NVME_SC_INVALID_QUEUE = 0x10c, 927 NVME_SC_INVALID_QUEUE = 0x10c,
910 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, 928 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
911 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, 929 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
912 NVME_SC_FEATURE_NOT_PER_NS = 0x10f, 930 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
913 NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, 931 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
932 NVME_SC_FW_NEEDS_RESET = 0x111,
933 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
934 NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
935 NVME_SC_OVERLAPPING_RANGE = 0x114,
936 NVME_SC_NS_INSUFFICENT_CAP = 0x115,
937 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
938 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
939 NVME_SC_NS_IS_PRIVATE = 0x119,
940 NVME_SC_NS_NOT_ATTACHED = 0x11a,
941 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
942 NVME_SC_CTRL_LIST_INVALID = 0x11c,
914 943
915 /* 944 /*
916 * I/O Command Set Specific - NVM commands: 945 * I/O Command Set Specific - NVM commands:
@@ -941,6 +970,7 @@ enum {
941 NVME_SC_REFTAG_CHECK = 0x284, 970 NVME_SC_REFTAG_CHECK = 0x284,
942 NVME_SC_COMPARE_FAILED = 0x285, 971 NVME_SC_COMPARE_FAILED = 0x285,
943 NVME_SC_ACCESS_DENIED = 0x286, 972 NVME_SC_ACCESS_DENIED = 0x286,
973 NVME_SC_UNWRITTEN_BLOCK = 0x287,
944 974
945 NVME_SC_DNR = 0x4000, 975 NVME_SC_DNR = 0x4000,
946}; 976};
@@ -960,6 +990,7 @@ struct nvme_completion {
960 __le16 status; /* did the command fail, and if so, why? */ 990 __le16 status; /* did the command fail, and if so, why? */
961}; 991};
962 992
963#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) 993#define NVME_VS(major, minor, tertiary) \
994 (((major) << 16) | ((minor) << 8) | (tertiary))
964 995
965#endif /* _LINUX_NVME_H */ 996#endif /* _LINUX_NVME_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 060d0ede88df..4741ecdb9817 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
1257extern void perf_event_enable(struct perf_event *event); 1257extern void perf_event_enable(struct perf_event *event);
1258extern void perf_event_disable(struct perf_event *event); 1258extern void perf_event_disable(struct perf_event *event);
1259extern void perf_event_disable_local(struct perf_event *event); 1259extern void perf_event_disable_local(struct perf_event *event);
1260extern void perf_event_disable_inatomic(struct perf_event *event);
1260extern void perf_event_task_tick(void); 1261extern void perf_event_task_tick(void);
1261#else /* !CONFIG_PERF_EVENTS: */ 1262#else /* !CONFIG_PERF_EVENTS: */
1262static inline void * 1263static inline void *
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0d7abb8b7315..91a740f6b884 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
902 unsigned long prot, int pkey); 902 unsigned long prot, int pkey);
903asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val); 903asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
904asmlinkage long sys_pkey_free(int pkey); 904asmlinkage long sys_pkey_free(int pkey);
905//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
906//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
907// unsigned long flags);
908 905
909#endif 906#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 45f004e9cc59..2873baf5372a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -14,17 +14,6 @@ struct timespec;
14struct compat_timespec; 14struct compat_timespec;
15 15
16#ifdef CONFIG_THREAD_INFO_IN_TASK 16#ifdef CONFIG_THREAD_INFO_IN_TASK
17struct thread_info {
18 unsigned long flags; /* low level flags */
19};
20
21#define INIT_THREAD_INFO(tsk) \
22{ \
23 .flags = 0, \
24}
25#endif
26
27#ifdef CONFIG_THREAD_INFO_IN_TASK
28#define current_thread_info() ((struct thread_info *)current) 17#define current_thread_info() ((struct thread_info *)current)
29#endif 18#endif
30 19
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index fb8e3b6febdf..c2119008990a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -177,6 +177,7 @@ enum tcm_sense_reason_table {
177 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), 177 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
178 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), 178 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
179 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), 179 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
180 TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
180#undef R 181#undef R
181}; 182};
182 183
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index dbfee7e86ba6..9b1462e38b82 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,10 +730,6 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) 730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
731#define __NR_pkey_free 290 731#define __NR_pkey_free 290
732__SYSCALL(__NR_pkey_free, sys_pkey_free) 732__SYSCALL(__NR_pkey_free, sys_pkey_free)
733#define __NR_pkey_get 291
734//__SYSCALL(__NR_pkey_get, sys_pkey_get)
735#define __NR_pkey_set 292
736//__SYSCALL(__NR_pkey_set, sys_pkey_set)
737 733
738#undef __NR_syscalls 734#undef __NR_syscalls
739#define __NR_syscalls 291 735#define __NR_syscalls 291
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6965d0909554..cd2be1c8e9fb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -75,6 +75,7 @@ header-y += bpf_perf_event.h
75header-y += bpf.h 75header-y += bpf.h
76header-y += bpqether.h 76header-y += bpqether.h
77header-y += bsg.h 77header-y += bsg.h
78header-y += bt-bmc.h
78header-y += btrfs.h 79header-y += btrfs.h
79header-y += can.h 80header-y += can.h
80header-y += capability.h 81header-y += capability.h
diff --git a/include/uapi/linux/bt-bmc.h b/include/uapi/linux/bt-bmc.h
new file mode 100644
index 000000000000..d9ec766a63d0
--- /dev/null
+++ b/include/uapi/linux/bt-bmc.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (c) 2015-2016, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef _UAPI_LINUX_BT_BMC_H
11#define _UAPI_LINUX_BT_BMC_H
12
13#include <linux/ioctl.h>
14
15#define __BT_BMC_IOCTL_MAGIC 0xb1
16#define BT_BMC_IOCTL_SMS_ATN _IO(__BT_BMC_IOCTL_MAGIC, 0x00)
17
18#endif /* _UAPI_LINUX_BT_BMC_H */
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index a521999de4f1..bf74eaa5c39f 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len)
53 size_t alen; 53 size_t alen;
54 54
55 alen = min(len, DATALEN_MSG); 55 alen = min(len, DATALEN_MSG);
56 msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); 56 msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT);
57 if (msg == NULL) 57 if (msg == NULL)
58 return NULL; 58 return NULL;
59 59
@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len)
65 while (len > 0) { 65 while (len > 0) {
66 struct msg_msgseg *seg; 66 struct msg_msgseg *seg;
67 alen = min(len, DATALEN_SEG); 67 alen = min(len, DATALEN_SEG);
68 seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); 68 seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
69 if (seg == NULL) 69 if (seg == NULL)
70 goto out_err; 70 goto out_err;
71 *pseg = seg; 71 *pseg = seg;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5df20d6d1520..29de1a9352c0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -228,7 +228,7 @@ static struct {
228 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), 228 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
229 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 229 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
230#ifdef CONFIG_DEBUG_LOCK_ALLOC 230#ifdef CONFIG_DEBUG_LOCK_ALLOC
231 .dep_map = {.name = "cpu_hotplug.lock" }, 231 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
232#endif 232#endif
233}; 233};
234 234
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c6e47e97b33f..0e292132efac 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1960,6 +1960,12 @@ void perf_event_disable(struct perf_event *event)
1960} 1960}
1961EXPORT_SYMBOL_GPL(perf_event_disable); 1961EXPORT_SYMBOL_GPL(perf_event_disable);
1962 1962
1963void perf_event_disable_inatomic(struct perf_event *event)
1964{
1965 event->pending_disable = 1;
1966 irq_work_queue(&event->pending);
1967}
1968
1963static void perf_set_shadow_time(struct perf_event *event, 1969static void perf_set_shadow_time(struct perf_event *event,
1964 struct perf_event_context *ctx, 1970 struct perf_event_context *ctx,
1965 u64 tstamp) 1971 u64 tstamp)
@@ -7075,8 +7081,8 @@ static int __perf_event_overflow(struct perf_event *event,
7075 if (events && atomic_dec_and_test(&event->event_limit)) { 7081 if (events && atomic_dec_and_test(&event->event_limit)) {
7076 ret = 1; 7082 ret = 1;
7077 event->pending_kill = POLL_HUP; 7083 event->pending_kill = POLL_HUP;
7078 event->pending_disable = 1; 7084
7079 irq_work_queue(&event->pending); 7085 perf_event_disable_inatomic(event);
7080 } 7086 }
7081 7087
7082 READ_ONCE(event->overflow_handler)(event, data, regs); 7088 READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -8855,7 +8861,10 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
8855 8861
8856void perf_pmu_unregister(struct pmu *pmu) 8862void perf_pmu_unregister(struct pmu *pmu)
8857{ 8863{
8864 int remove_device;
8865
8858 mutex_lock(&pmus_lock); 8866 mutex_lock(&pmus_lock);
8867 remove_device = pmu_bus_running;
8859 list_del_rcu(&pmu->entry); 8868 list_del_rcu(&pmu->entry);
8860 mutex_unlock(&pmus_lock); 8869 mutex_unlock(&pmus_lock);
8861 8870
@@ -8869,10 +8878,12 @@ void perf_pmu_unregister(struct pmu *pmu)
8869 free_percpu(pmu->pmu_disable_count); 8878 free_percpu(pmu->pmu_disable_count);
8870 if (pmu->type >= PERF_TYPE_MAX) 8879 if (pmu->type >= PERF_TYPE_MAX)
8871 idr_remove(&pmu_idr, pmu->type); 8880 idr_remove(&pmu_idr, pmu->type);
8872 if (pmu->nr_addr_filters) 8881 if (remove_device) {
8873 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); 8882 if (pmu->nr_addr_filters)
8874 device_del(pmu->dev); 8883 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
8875 put_device(pmu->dev); 8884 device_del(pmu->dev);
8885 put_device(pmu->dev);
8886 }
8876 free_pmu_context(pmu); 8887 free_pmu_context(pmu);
8877} 8888}
8878EXPORT_SYMBOL_GPL(perf_pmu_unregister); 8889EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d4129bb05e5d..f9ec9add2164 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
300 300
301retry: 301retry:
302 /* Read the page with vaddr into memory */ 302 /* Read the page with vaddr into memory */
303 ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); 303 ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
304 &vma);
304 if (ret <= 0) 305 if (ret <= 0)
305 return ret; 306 return ret;
306 307
@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1710 * but we treat this as a 'remote' access since it is 1711 * but we treat this as a 'remote' access since it is
1711 * essentially a kernel access to the memory. 1712 * essentially a kernel access to the memory.
1712 */ 1713 */
1713 result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); 1714 result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
1715 NULL);
1714 if (result < 0) 1716 if (result < 0)
1715 return result; 1717 return result;
1716 1718
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0c5f1a5db654..9c4d30483264 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -721,6 +721,7 @@ int irq_set_parent(int irq, int parent_irq)
721 irq_put_desc_unlock(desc, flags); 721 irq_put_desc_unlock(desc, flags);
722 return 0; 722 return 0;
723} 723}
724EXPORT_SYMBOL_GPL(irq_set_parent);
724#endif 725#endif
725 726
726/* 727/*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 8d44b3fea9d0..30e6d05aa5a9 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -53,8 +53,15 @@ void notrace __sanitizer_cov_trace_pc(void)
53 /* 53 /*
54 * We are interested in code coverage as a function of a syscall inputs, 54 * We are interested in code coverage as a function of a syscall inputs,
55 * so we ignore code executed in interrupts. 55 * so we ignore code executed in interrupts.
56 * The checks for whether we are in an interrupt are open-coded, because
57 * 1. We can't use in_interrupt() here, since it also returns true
58 * when we are inside local_bh_disable() section.
59 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
60 * since that leads to slower generated code (three separate tests,
61 * one for each of the flags).
56 */ 62 */
57 if (!t || in_interrupt()) 63 if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
64 | NMI_MASK)))
58 return; 65 return;
59 mode = READ_ONCE(t->kcov_mode); 66 mode = READ_ONCE(t->kcov_mode);
60 if (mode == KCOV_MODE_TRACE) { 67 if (mode == KCOV_MODE_TRACE) {
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1e7f5da648d9..6ccb08f57fcb 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -498,9 +498,9 @@ static int enter_state(suspend_state_t state)
498 498
499#ifndef CONFIG_SUSPEND_SKIP_SYNC 499#ifndef CONFIG_SUSPEND_SKIP_SYNC
500 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 500 trace_suspend_resume(TPS("sync_filesystems"), 0, true);
501 printk(KERN_INFO "PM: Syncing filesystems ... "); 501 pr_info("PM: Syncing filesystems ... ");
502 sys_sync(); 502 sys_sync();
503 printk("done.\n"); 503 pr_cont("done.\n");
504 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 504 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
505#endif 505#endif
506 506
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index d5e397315473..de08fc90baaf 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1769,6 +1769,10 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
1769 cont_flush(); 1769 cont_flush();
1770 } 1770 }
1771 1771
1772 /* Skip empty continuation lines that couldn't be added - they just flush */
1773 if (!text_len && (lflags & LOG_CONT))
1774 return 0;
1775
1772 /* If it doesn't end in a newline, try to buffer the current line */ 1776 /* If it doesn't end in a newline, try to buffer the current line */
1773 if (!(lflags & LOG_NEWLINE)) { 1777 if (!(lflags & LOG_NEWLINE)) {
1774 if (cont_add(facility, level, lflags, text, text_len)) 1778 if (cont_add(facility, level, lflags, text, text_len))
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2a99027312a6..e6474f7272ec 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
537 int this_len, retval; 537 int this_len, retval;
538 538
539 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 539 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
540 retval = access_process_vm(tsk, src, buf, this_len, 0); 540 retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
541 if (!retval) { 541 if (!retval) {
542 if (copied) 542 if (copied)
543 break; 543 break;
@@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
564 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 564 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
565 if (copy_from_user(buf, src, this_len)) 565 if (copy_from_user(buf, src, this_len))
566 return -EFAULT; 566 return -EFAULT;
567 retval = access_process_vm(tsk, dst, buf, this_len, 1); 567 retval = access_process_vm(tsk, dst, buf, this_len,
568 FOLL_FORCE | FOLL_WRITE);
568 if (!retval) { 569 if (!retval) {
569 if (copied) 570 if (copied)
570 break; 571 break;
@@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1127 unsigned long tmp; 1128 unsigned long tmp;
1128 int copied; 1129 int copied;
1129 1130
1130 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 1131 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1131 if (copied != sizeof(tmp)) 1132 if (copied != sizeof(tmp))
1132 return -EIO; 1133 return -EIO;
1133 return put_user(tmp, (unsigned long __user *)data); 1134 return put_user(tmp, (unsigned long __user *)data);
@@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1138{ 1139{
1139 int copied; 1140 int copied;
1140 1141
1141 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 1142 copied = access_process_vm(tsk, addr, &data, sizeof(data),
1143 FOLL_FORCE | FOLL_WRITE);
1142 return (copied == sizeof(data)) ? 0 : -EIO; 1144 return (copied == sizeof(data)) ? 0 : -EIO;
1143} 1145}
1144 1146
@@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1155 switch (request) { 1157 switch (request) {
1156 case PTRACE_PEEKTEXT: 1158 case PTRACE_PEEKTEXT:
1157 case PTRACE_PEEKDATA: 1159 case PTRACE_PEEKDATA:
1158 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 1160 ret = access_process_vm(child, addr, &word, sizeof(word),
1161 FOLL_FORCE);
1159 if (ret != sizeof(word)) 1162 if (ret != sizeof(word))
1160 ret = -EIO; 1163 ret = -EIO;
1161 else 1164 else
@@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1164 1167
1165 case PTRACE_POKETEXT: 1168 case PTRACE_POKETEXT:
1166 case PTRACE_POKEDATA: 1169 case PTRACE_POKEDATA:
1167 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 1170 ret = access_process_vm(child, addr, &data, sizeof(data),
1171 FOLL_FORCE | FOLL_WRITE);
1168 ret = (ret != sizeof(data) ? -EIO : 0); 1172 ret = (ret != sizeof(data) ? -EIO : 0);
1169 break; 1173 break;
1170 1174
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 94732d1ab00a..42d4027f9e26 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7515,11 +7515,27 @@ static struct kmem_cache *task_group_cache __read_mostly;
7515DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 7515DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7516DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 7516DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
7517 7517
7518#define WAIT_TABLE_BITS 8
7519#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
7520static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
7521
7522wait_queue_head_t *bit_waitqueue(void *word, int bit)
7523{
7524 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
7525 unsigned long val = (unsigned long)word << shift | bit;
7526
7527 return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
7528}
7529EXPORT_SYMBOL(bit_waitqueue);
7530
7518void __init sched_init(void) 7531void __init sched_init(void)
7519{ 7532{
7520 int i, j; 7533 int i, j;
7521 unsigned long alloc_size = 0, ptr; 7534 unsigned long alloc_size = 0, ptr;
7522 7535
7536 for (i = 0; i < WAIT_TABLE_SIZE; i++)
7537 init_waitqueue_head(bit_wait_table + i);
7538
7523#ifdef CONFIG_FAIR_GROUP_SCHED 7539#ifdef CONFIG_FAIR_GROUP_SCHED
7524 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7540 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7525#endif 7541#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2d4ad72f8f3c..c242944f5cbd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)
690 * will definitely be update (after enqueue). 690 * will definitely be update (after enqueue).
691 */ 691 */
692 sa->period_contrib = 1023; 692 sa->period_contrib = 1023;
693 sa->load_avg = scale_load_down(se->load.weight); 693 /*
694 * Tasks are intialized with full load to be seen as heavy tasks until
695 * they get a chance to stabilize to their real load level.
696 * Group entities are intialized with zero load to reflect the fact that
697 * nothing has been attached to the task group yet.
698 */
699 if (entity_is_task(se))
700 sa->load_avg = scale_load_down(se->load.weight);
694 sa->load_sum = sa->load_avg * LOAD_AVG_MAX; 701 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
695 /* 702 /*
696 * At this point, util_avg won't be used in select_task_rq_fair anyway 703 * At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
5471 */ 5478 */
5472static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) 5479static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5473{ 5480{
5474 struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); 5481 struct sched_domain *this_sd;
5475 u64 avg_idle = this_rq()->avg_idle; 5482 u64 avg_cost, avg_idle = this_rq()->avg_idle;
5476 u64 avg_cost = this_sd->avg_scan_cost;
5477 u64 time, cost; 5483 u64 time, cost;
5478 s64 delta; 5484 s64 delta;
5479 int cpu, wrap; 5485 int cpu, wrap;
5480 5486
5487 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5488 if (!this_sd)
5489 return -1;
5490
5491 avg_cost = this_sd->avg_scan_cost;
5492
5481 /* 5493 /*
5482 * Due to large variance we need a large fuzz factor; hackbench in 5494 * Due to large variance we need a large fuzz factor; hackbench in
5483 * particularly is sensitive here. 5495 * particularly is sensitive here.
@@ -8827,7 +8839,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8827{ 8839{
8828 struct sched_entity *se; 8840 struct sched_entity *se;
8829 struct cfs_rq *cfs_rq; 8841 struct cfs_rq *cfs_rq;
8830 struct rq *rq;
8831 int i; 8842 int i;
8832 8843
8833 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); 8844 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8842,8 +8853,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8842 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 8853 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8843 8854
8844 for_each_possible_cpu(i) { 8855 for_each_possible_cpu(i) {
8845 rq = cpu_rq(i);
8846
8847 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 8856 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8848 GFP_KERNEL, cpu_to_node(i)); 8857 GFP_KERNEL, cpu_to_node(i));
8849 if (!cfs_rq) 8858 if (!cfs_rq)
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4f7053579fe3..9453efe9b25a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -480,16 +480,6 @@ void wake_up_bit(void *word, int bit)
480} 480}
481EXPORT_SYMBOL(wake_up_bit); 481EXPORT_SYMBOL(wake_up_bit);
482 482
483wait_queue_head_t *bit_waitqueue(void *word, int bit)
484{
485 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
486 const struct zone *zone = page_zone(virt_to_page(word));
487 unsigned long val = (unsigned long)word << shift | bit;
488
489 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
490}
491EXPORT_SYMBOL(bit_waitqueue);
492
493/* 483/*
494 * Manipulate the atomic_t address to produce a better bit waitqueue table hash 484 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
495 * index (we're keying off bit -1, but that would produce a horrible hash 485 * index (we're keying off bit -1, but that would produce a horrible hash
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 1bf81ef91375..744fa611cae0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 59
60const char * const softirq_to_name[NR_SOFTIRQS] = { 60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU" 62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63}; 63};
64 64
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c3aad685bbc0..12dd190634ab 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -542,7 +542,6 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
542static int alarm_timer_create(struct k_itimer *new_timer) 542static int alarm_timer_create(struct k_itimer *new_timer)
543{ 543{
544 enum alarmtimer_type type; 544 enum alarmtimer_type type;
545 struct alarm_base *base;
546 545
547 if (!alarmtimer_get_rtcdev()) 546 if (!alarmtimer_get_rtcdev())
548 return -ENOTSUPP; 547 return -ENOTSUPP;
@@ -551,7 +550,6 @@ static int alarm_timer_create(struct k_itimer *new_timer)
551 return -EPERM; 550 return -EPERM;
552 551
553 type = clock2alarm(new_timer->it_clock); 552 type = clock2alarm(new_timer->it_clock);
554 base = &alarm_bases[type];
555 alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer); 553 alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
556 return 0; 554 return 0;
557} 555}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 2d47980a1bc4..c611c47de884 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
878 878
879#ifdef CONFIG_NO_HZ_COMMON 879#ifdef CONFIG_NO_HZ_COMMON
880static inline struct timer_base * 880static inline struct timer_base *
881__get_target_base(struct timer_base *base, unsigned tflags) 881get_target_base(struct timer_base *base, unsigned tflags)
882{ 882{
883#ifdef CONFIG_SMP 883#ifdef CONFIG_SMP
884 if ((tflags & TIMER_PINNED) || !base->migration_enabled) 884 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
891 891
892static inline void forward_timer_base(struct timer_base *base) 892static inline void forward_timer_base(struct timer_base *base)
893{ 893{
894 unsigned long jnow = READ_ONCE(jiffies);
895
894 /* 896 /*
895 * We only forward the base when it's idle and we have a delta between 897 * We only forward the base when it's idle and we have a delta between
896 * base clock and jiffies. 898 * base clock and jiffies.
897 */ 899 */
898 if (!base->is_idle || (long) (jiffies - base->clk) < 2) 900 if (!base->is_idle || (long) (jnow - base->clk) < 2)
899 return; 901 return;
900 902
901 /* 903 /*
902 * If the next expiry value is > jiffies, then we fast forward to 904 * If the next expiry value is > jiffies, then we fast forward to
903 * jiffies otherwise we forward to the next expiry value. 905 * jiffies otherwise we forward to the next expiry value.
904 */ 906 */
905 if (time_after(base->next_expiry, jiffies)) 907 if (time_after(base->next_expiry, jnow))
906 base->clk = jiffies; 908 base->clk = jnow;
907 else 909 else
908 base->clk = base->next_expiry; 910 base->clk = base->next_expiry;
909} 911}
910#else 912#else
911static inline struct timer_base * 913static inline struct timer_base *
912__get_target_base(struct timer_base *base, unsigned tflags) 914get_target_base(struct timer_base *base, unsigned tflags)
913{ 915{
914 return get_timer_this_cpu_base(tflags); 916 return get_timer_this_cpu_base(tflags);
915} 917}
@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
917static inline void forward_timer_base(struct timer_base *base) { } 919static inline void forward_timer_base(struct timer_base *base) { }
918#endif 920#endif
919 921
920static inline struct timer_base *
921get_target_base(struct timer_base *base, unsigned tflags)
922{
923 struct timer_base *target = __get_target_base(base, tflags);
924
925 forward_timer_base(target);
926 return target;
927}
928 922
929/* 923/*
930 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means 924 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
943{ 937{
944 for (;;) { 938 for (;;) {
945 struct timer_base *base; 939 struct timer_base *base;
946 u32 tf = timer->flags; 940 u32 tf;
941
942 /*
943 * We need to use READ_ONCE() here, otherwise the compiler
944 * might re-read @tf between the check for TIMER_MIGRATING
945 * and spin_lock().
946 */
947 tf = READ_ONCE(timer->flags);
947 948
948 if (!(tf & TIMER_MIGRATING)) { 949 if (!(tf & TIMER_MIGRATING)) {
949 base = get_timer_base(tf); 950 base = get_timer_base(tf);
@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
964 unsigned long clk = 0, flags; 965 unsigned long clk = 0, flags;
965 int ret = 0; 966 int ret = 0;
966 967
968 BUG_ON(!timer->function);
969
967 /* 970 /*
968 * This is a common optimization triggered by the networking code - if 971 * This is a common optimization triggered by the networking code - if
969 * the timer is re-modified to have the same timeout or ends up in the 972 * the timer is re-modified to have the same timeout or ends up in the
@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
972 if (timer_pending(timer)) { 975 if (timer_pending(timer)) {
973 if (timer->expires == expires) 976 if (timer->expires == expires)
974 return 1; 977 return 1;
978
975 /* 979 /*
976 * Take the current timer_jiffies of base, but without holding 980 * We lock timer base and calculate the bucket index right
977 * the lock! 981 * here. If the timer ends up in the same bucket, then we
982 * just update the expiry time and avoid the whole
983 * dequeue/enqueue dance.
978 */ 984 */
979 base = get_timer_base(timer->flags); 985 base = lock_timer_base(timer, &flags);
980 clk = base->clk;
981 986
987 clk = base->clk;
982 idx = calc_wheel_index(expires, clk); 988 idx = calc_wheel_index(expires, clk);
983 989
984 /* 990 /*
@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
988 */ 994 */
989 if (idx == timer_get_idx(timer)) { 995 if (idx == timer_get_idx(timer)) {
990 timer->expires = expires; 996 timer->expires = expires;
991 return 1; 997 ret = 1;
998 goto out_unlock;
992 } 999 }
1000 } else {
1001 base = lock_timer_base(timer, &flags);
993 } 1002 }
994 1003
995 timer_stats_timer_set_start_info(timer); 1004 timer_stats_timer_set_start_info(timer);
996 BUG_ON(!timer->function);
997
998 base = lock_timer_base(timer, &flags);
999 1005
1000 ret = detach_if_pending(timer, base, false); 1006 ret = detach_if_pending(timer, base, false);
1001 if (!ret && pending_only) 1007 if (!ret && pending_only)
@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1025 } 1031 }
1026 } 1032 }
1027 1033
1034 /* Try to forward a stale timer base clock */
1035 forward_timer_base(base);
1036
1028 timer->expires = expires; 1037 timer->expires = expires;
1029 /* 1038 /*
1030 * If 'idx' was calculated above and the base time did not advance 1039 * If 'idx' was calculated above and the base time did not advance
1031 * between calculating 'idx' and taking the lock, only enqueue_timer() 1040 * between calculating 'idx' and possibly switching the base, only
1032 * and trigger_dyntick_cpu() is required. Otherwise we need to 1041 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
1033 * (re)calculate the wheel index via internal_add_timer(). 1042 * we need to (re)calculate the wheel index via
1043 * internal_add_timer().
1034 */ 1044 */
1035 if (idx != UINT_MAX && clk == base->clk) { 1045 if (idx != UINT_MAX && clk == base->clk) {
1036 enqueue_timer(base, timer, idx); 1046 enqueue_timer(base, timer, idx);
@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1510 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1520 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
1511 base->next_expiry = nextevt; 1521 base->next_expiry = nextevt;
1512 /* 1522 /*
1513 * We have a fresh next event. Check whether we can forward the base: 1523 * We have a fresh next event. Check whether we can forward the
1524 * base. We can only do that when @basej is past base->clk
1525 * otherwise we might rewind base->clk.
1514 */ 1526 */
1515 if (time_after(nextevt, jiffies)) 1527 if (time_after(basej, base->clk)) {
1516 base->clk = jiffies; 1528 if (time_after(nextevt, basej))
1517 else if (time_after(nextevt, base->clk)) 1529 base->clk = basej;
1518 base->clk = nextevt; 1530 else if (time_after(nextevt, base->clk))
1531 base->clk = nextevt;
1532 }
1519 1533
1520 if (time_before_eq(nextevt, basej)) { 1534 if (time_before_eq(nextevt, basej)) {
1521 expires = basem; 1535 expires = basem;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 33bc56cf60d7..b01e547d4d04 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -198,6 +198,7 @@ config FRAME_WARN
198 int "Warn for stack frames larger than (needs gcc 4.4)" 198 int "Warn for stack frames larger than (needs gcc 4.4)"
199 range 0 8192 199 range 0 8192
200 default 0 if KASAN 200 default 0 if KASAN
201 default 2048 if GCC_PLUGIN_LATENT_ENTROPY
201 default 1024 if !64BIT 202 default 1024 if !64BIT
202 default 2048 if 64BIT 203 default 2048 if 64BIT
203 help 204 help
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 0a1139644d32..144fe6b1a03e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
292 struct gen_pool_chunk *chunk; 292 struct gen_pool_chunk *chunk;
293 unsigned long addr = 0; 293 unsigned long addr = 0;
294 int order = pool->min_alloc_order; 294 int order = pool->min_alloc_order;
295 int nbits, start_bit = 0, end_bit, remain; 295 int nbits, start_bit, end_bit, remain;
296 296
297#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 297#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
298 BUG_ON(in_nmi()); 298 BUG_ON(in_nmi());
@@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
307 if (size > atomic_read(&chunk->avail)) 307 if (size > atomic_read(&chunk->avail))
308 continue; 308 continue;
309 309
310 start_bit = 0;
310 end_bit = chunk_size(chunk) >> order; 311 end_bit = chunk_size(chunk) >> order;
311retry: 312retry:
312 start_bit = algo(chunk->bits, end_bit, start_bit, 313 start_bit = algo(chunk->bits, end_bit, start_bit,
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 60f77f1d470a..4d830e299989 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -50,7 +50,7 @@
50 STACK_ALLOC_ALIGN) 50 STACK_ALLOC_ALIGN)
51#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ 51#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) 52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
53#define STACK_ALLOC_SLABS_CAP 1024 53#define STACK_ALLOC_SLABS_CAP 8192
54#define STACK_ALLOC_MAX_SLABS \ 54#define STACK_ALLOC_MAX_SLABS \
55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ 55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
56 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) 56 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
diff --git a/mm/Kconfig b/mm/Kconfig
index be0ee11fa0d9..86e3e0e74d20 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -187,7 +187,7 @@ config MEMORY_HOTPLUG
187 bool "Allow for memory hot-add" 187 bool "Allow for memory hot-add"
188 depends on SPARSEMEM || X86_64_ACPI_NUMA 188 depends on SPARSEMEM || X86_64_ACPI_NUMA
189 depends on ARCH_ENABLE_MEMORY_HOTPLUG 189 depends on ARCH_ENABLE_MEMORY_HOTPLUG
190 depends on !KASAN 190 depends on COMPILE_TEST || !KASAN
191 191
192config MEMORY_HOTPLUG_SPARSE 192config MEMORY_HOTPLUG_SPARSE
193 def_bool y 193 def_bool y
diff --git a/mm/filemap.c b/mm/filemap.c
index 849f459ad078..c7fe2f16503f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -790,9 +790,7 @@ EXPORT_SYMBOL(__page_cache_alloc);
790 */ 790 */
791wait_queue_head_t *page_waitqueue(struct page *page) 791wait_queue_head_t *page_waitqueue(struct page *page)
792{ 792{
793 const struct zone *zone = page_zone(page); 793 return bit_waitqueue(page, 0);
794
795 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
796} 794}
797EXPORT_SYMBOL(page_waitqueue); 795EXPORT_SYMBOL(page_waitqueue);
798 796
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 381bb07ed14f..db77dcb38afd 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -11,10 +11,7 @@
11 * get_vaddr_frames() - map virtual addresses to pfns 11 * get_vaddr_frames() - map virtual addresses to pfns
12 * @start: starting user address 12 * @start: starting user address
13 * @nr_frames: number of pages / pfns from start to map 13 * @nr_frames: number of pages / pfns from start to map
14 * @write: whether pages will be written to by the caller 14 * @gup_flags: flags modifying lookup behaviour
15 * @force: whether to force write access even if user mapping is
16 * readonly. See description of the same argument of
17 get_user_pages().
18 * @vec: structure which receives pages / pfns of the addresses mapped. 15 * @vec: structure which receives pages / pfns of the addresses mapped.
19 * It should have space for at least nr_frames entries. 16 * It should have space for at least nr_frames entries.
20 * 17 *
@@ -34,7 +31,7 @@
34 * This function takes care of grabbing mmap_sem as necessary. 31 * This function takes care of grabbing mmap_sem as necessary.
35 */ 32 */
36int get_vaddr_frames(unsigned long start, unsigned int nr_frames, 33int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
37 bool write, bool force, struct frame_vector *vec) 34 unsigned int gup_flags, struct frame_vector *vec)
38{ 35{
39 struct mm_struct *mm = current->mm; 36 struct mm_struct *mm = current->mm;
40 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
@@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
59 vec->got_ref = true; 56 vec->got_ref = true;
60 vec->is_pfns = false; 57 vec->is_pfns = false;
61 ret = get_user_pages_locked(start, nr_frames, 58 ret = get_user_pages_locked(start, nr_frames,
62 write, force, (struct page **)(vec->ptrs), &locked); 59 gup_flags, (struct page **)(vec->ptrs), &locked);
63 goto out; 60 goto out;
64 } 61 }
65 62
diff --git a/mm/gup.c b/mm/gup.c
index 96b2b2fd0fbd..ec4f82704b6f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
60 return -EEXIST; 60 return -EEXIST;
61} 61}
62 62
63/*
64 * FOLL_FORCE can write to even unwritable pte's, but only
65 * after we've gone through a COW cycle and they are dirty.
66 */
67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68{
69 return pte_write(pte) ||
70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71}
72
63static struct page *follow_page_pte(struct vm_area_struct *vma, 73static struct page *follow_page_pte(struct vm_area_struct *vma,
64 unsigned long address, pmd_t *pmd, unsigned int flags) 74 unsigned long address, pmd_t *pmd, unsigned int flags)
65{ 75{
@@ -95,7 +105,7 @@ retry:
95 } 105 }
96 if ((flags & FOLL_NUMA) && pte_protnone(pte)) 106 if ((flags & FOLL_NUMA) && pte_protnone(pte))
97 goto no_page; 107 goto no_page;
98 if ((flags & FOLL_WRITE) && !pte_write(pte)) { 108 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
99 pte_unmap_unlock(ptep, ptl); 109 pte_unmap_unlock(ptep, ptl);
100 return NULL; 110 return NULL;
101 } 111 }
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
412 * reCOWed by userspace write). 422 * reCOWed by userspace write).
413 */ 423 */
414 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) 424 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
415 *flags &= ~FOLL_WRITE; 425 *flags |= FOLL_COW;
416 return 0; 426 return 0;
417} 427}
418 428
@@ -516,7 +526,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
516 * instead of __get_user_pages. __get_user_pages should be used only if 526 * instead of __get_user_pages. __get_user_pages should be used only if
517 * you need some special @gup_flags. 527 * you need some special @gup_flags.
518 */ 528 */
519long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 529static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
520 unsigned long start, unsigned long nr_pages, 530 unsigned long start, unsigned long nr_pages,
521 unsigned int gup_flags, struct page **pages, 531 unsigned int gup_flags, struct page **pages,
522 struct vm_area_struct **vmas, int *nonblocking) 532 struct vm_area_struct **vmas, int *nonblocking)
@@ -621,7 +631,6 @@ next_page:
621 } while (nr_pages); 631 } while (nr_pages);
622 return i; 632 return i;
623} 633}
624EXPORT_SYMBOL(__get_user_pages);
625 634
626bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) 635bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
627{ 636{
@@ -729,7 +738,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
729 struct mm_struct *mm, 738 struct mm_struct *mm,
730 unsigned long start, 739 unsigned long start,
731 unsigned long nr_pages, 740 unsigned long nr_pages,
732 int write, int force,
733 struct page **pages, 741 struct page **pages,
734 struct vm_area_struct **vmas, 742 struct vm_area_struct **vmas,
735 int *locked, bool notify_drop, 743 int *locked, bool notify_drop,
@@ -747,10 +755,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
747 755
748 if (pages) 756 if (pages)
749 flags |= FOLL_GET; 757 flags |= FOLL_GET;
750 if (write)
751 flags |= FOLL_WRITE;
752 if (force)
753 flags |= FOLL_FORCE;
754 758
755 pages_done = 0; 759 pages_done = 0;
756 lock_dropped = false; 760 lock_dropped = false;
@@ -843,12 +847,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
843 * up_read(&mm->mmap_sem); 847 * up_read(&mm->mmap_sem);
844 */ 848 */
845long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 849long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
846 int write, int force, struct page **pages, 850 unsigned int gup_flags, struct page **pages,
847 int *locked) 851 int *locked)
848{ 852{
849 return __get_user_pages_locked(current, current->mm, start, nr_pages, 853 return __get_user_pages_locked(current, current->mm, start, nr_pages,
850 write, force, pages, NULL, locked, true, 854 pages, NULL, locked, true,
851 FOLL_TOUCH); 855 gup_flags | FOLL_TOUCH);
852} 856}
853EXPORT_SYMBOL(get_user_pages_locked); 857EXPORT_SYMBOL(get_user_pages_locked);
854 858
@@ -864,14 +868,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
864 */ 868 */
865__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 869__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
866 unsigned long start, unsigned long nr_pages, 870 unsigned long start, unsigned long nr_pages,
867 int write, int force, struct page **pages, 871 struct page **pages, unsigned int gup_flags)
868 unsigned int gup_flags)
869{ 872{
870 long ret; 873 long ret;
871 int locked = 1; 874 int locked = 1;
875
872 down_read(&mm->mmap_sem); 876 down_read(&mm->mmap_sem);
873 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 877 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
874 pages, NULL, &locked, false, gup_flags); 878 &locked, false, gup_flags);
875 if (locked) 879 if (locked)
876 up_read(&mm->mmap_sem); 880 up_read(&mm->mmap_sem);
877 return ret; 881 return ret;
@@ -896,10 +900,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
896 * "force" parameter). 900 * "force" parameter).
897 */ 901 */
898long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 902long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
899 int write, int force, struct page **pages) 903 struct page **pages, unsigned int gup_flags)
900{ 904{
901 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 905 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
902 write, force, pages, FOLL_TOUCH); 906 pages, gup_flags | FOLL_TOUCH);
903} 907}
904EXPORT_SYMBOL(get_user_pages_unlocked); 908EXPORT_SYMBOL(get_user_pages_unlocked);
905 909
@@ -910,9 +914,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
910 * @mm: mm_struct of target mm 914 * @mm: mm_struct of target mm
911 * @start: starting user address 915 * @start: starting user address
912 * @nr_pages: number of pages from start to pin 916 * @nr_pages: number of pages from start to pin
913 * @write: whether pages will be written to by the caller 917 * @gup_flags: flags modifying lookup behaviour
914 * @force: whether to force access even when user mapping is currently
915 * protected (but never forces write access to shared mapping).
916 * @pages: array that receives pointers to the pages pinned. 918 * @pages: array that receives pointers to the pages pinned.
917 * Should be at least nr_pages long. Or NULL, if caller 919 * Should be at least nr_pages long. Or NULL, if caller
918 * only intends to ensure the pages are faulted in. 920 * only intends to ensure the pages are faulted in.
@@ -941,9 +943,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
941 * or similar operation cannot guarantee anything stronger anyway because 943 * or similar operation cannot guarantee anything stronger anyway because
942 * locks can't be held over the syscall boundary. 944 * locks can't be held over the syscall boundary.
943 * 945 *
944 * If write=0, the page must not be written to. If the page is written to, 946 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
945 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called 947 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
946 * after the page is finished with, and before put_page is called. 948 * be called after the page is finished with, and before put_page is called.
947 * 949 *
948 * get_user_pages is typically used for fewer-copy IO operations, to get a 950 * get_user_pages is typically used for fewer-copy IO operations, to get a
949 * handle on the memory by some means other than accesses via the user virtual 951 * handle on the memory by some means other than accesses via the user virtual
@@ -960,12 +962,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
960 */ 962 */
961long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 963long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
962 unsigned long start, unsigned long nr_pages, 964 unsigned long start, unsigned long nr_pages,
963 int write, int force, struct page **pages, 965 unsigned int gup_flags, struct page **pages,
964 struct vm_area_struct **vmas) 966 struct vm_area_struct **vmas)
965{ 967{
966 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 968 return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
967 pages, vmas, NULL, false, 969 NULL, false,
968 FOLL_TOUCH | FOLL_REMOTE); 970 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
969} 971}
970EXPORT_SYMBOL(get_user_pages_remote); 972EXPORT_SYMBOL(get_user_pages_remote);
971 973
@@ -976,12 +978,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
976 * obviously don't pass FOLL_REMOTE in here. 978 * obviously don't pass FOLL_REMOTE in here.
977 */ 979 */
978long get_user_pages(unsigned long start, unsigned long nr_pages, 980long get_user_pages(unsigned long start, unsigned long nr_pages,
979 int write, int force, struct page **pages, 981 unsigned int gup_flags, struct page **pages,
980 struct vm_area_struct **vmas) 982 struct vm_area_struct **vmas)
981{ 983{
982 return __get_user_pages_locked(current, current->mm, start, nr_pages, 984 return __get_user_pages_locked(current, current->mm, start, nr_pages,
983 write, force, pages, vmas, NULL, false, 985 pages, vmas, NULL, false,
984 FOLL_TOUCH); 986 gup_flags | FOLL_TOUCH);
985} 987}
986EXPORT_SYMBOL(get_user_pages); 988EXPORT_SYMBOL(get_user_pages);
987 989
@@ -1505,7 +1507,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1505 start += nr << PAGE_SHIFT; 1507 start += nr << PAGE_SHIFT;
1506 pages += nr; 1508 pages += nr;
1507 1509
1508 ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); 1510 ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
1511 write ? FOLL_WRITE : 0);
1509 1512
1510 /* Have to be a bit careful with return values */ 1513 /* Have to be a bit careful with return values */
1511 if (nr > 0) { 1514 if (nr > 0) {
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 88af13c00d3c..70c009741aab 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -34,6 +34,7 @@
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/bug.h>
37 38
38#include "kasan.h" 39#include "kasan.h"
39#include "../slab.h" 40#include "../slab.h"
@@ -62,7 +63,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
62 } 63 }
63} 64}
64 65
65static void __kasan_unpoison_stack(struct task_struct *task, void *sp) 66static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
66{ 67{
67 void *base = task_stack_page(task); 68 void *base = task_stack_page(task);
68 size_t size = sp - base; 69 size_t size = sp - base;
@@ -77,9 +78,24 @@ void kasan_unpoison_task_stack(struct task_struct *task)
77} 78}
78 79
79/* Unpoison the stack for the current task beyond a watermark sp value. */ 80/* Unpoison the stack for the current task beyond a watermark sp value. */
80asmlinkage void kasan_unpoison_remaining_stack(void *sp) 81asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
81{ 82{
82 __kasan_unpoison_stack(current, sp); 83 __kasan_unpoison_stack(current, watermark);
84}
85
86/*
87 * Clear all poison for the region between the current SP and a provided
88 * watermark value, as is sometimes required prior to hand-crafted asm function
89 * returns in the middle of functions.
90 */
91void kasan_unpoison_stack_above_sp_to(const void *watermark)
92{
93 const void *sp = __builtin_frame_address(0);
94 size_t size = watermark - sp;
95
96 if (WARN_ON(sp > watermark))
97 return;
98 kasan_unpoison_shadow(sp, size);
83} 99}
84 100
85/* 101/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a5e453cf05c4..e5355a5b423f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1453,8 +1453,11 @@ static void kmemleak_scan(void)
1453 1453
1454 read_lock(&tasklist_lock); 1454 read_lock(&tasklist_lock);
1455 do_each_thread(g, p) { 1455 do_each_thread(g, p) {
1456 scan_block(task_stack_page(p), task_stack_page(p) + 1456 void *stack = try_get_task_stack(p);
1457 THREAD_SIZE, NULL); 1457 if (stack) {
1458 scan_block(stack, stack + THREAD_SIZE, NULL);
1459 put_task_stack(p);
1460 }
1458 } while_each_thread(g, p); 1461 } while_each_thread(g, p);
1459 read_unlock(&tasklist_lock); 1462 read_unlock(&tasklist_lock);
1460 } 1463 }
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 1d05cb9d363d..234676e31edd 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
554 err = memcg_init_list_lru(lru, memcg_aware); 554 err = memcg_init_list_lru(lru, memcg_aware);
555 if (err) { 555 if (err) {
556 kfree(lru->node); 556 kfree(lru->node);
557 /* Do this so a list_lru_destroy() doesn't crash: */
558 lru->node = NULL;
557 goto out; 559 goto out;
558 } 560 }
559 561
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ae052b5e3315..0f870ba43942 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1917,6 +1917,15 @@ retry:
1917 current->flags & PF_EXITING)) 1917 current->flags & PF_EXITING))
1918 goto force; 1918 goto force;
1919 1919
1920 /*
1921 * Prevent unbounded recursion when reclaim operations need to
1922 * allocate memory. This might exceed the limits temporarily,
1923 * but we prefer facilitating memory reclaim and getting back
1924 * under the limit over triggering OOM kills in these cases.
1925 */
1926 if (unlikely(current->flags & PF_MEMALLOC))
1927 goto force;
1928
1920 if (unlikely(task_in_memcg_oom(current))) 1929 if (unlikely(task_in_memcg_oom(current)))
1921 goto nomem; 1930 goto nomem;
1922 1931
diff --git a/mm/memory.c b/mm/memory.c
index fc1987dfd8cc..e18c57bdc75c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
3869 * given task for page fault accounting. 3869 * given task for page fault accounting.
3870 */ 3870 */
3871static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 3871static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3872 unsigned long addr, void *buf, int len, int write) 3872 unsigned long addr, void *buf, int len, unsigned int gup_flags)
3873{ 3873{
3874 struct vm_area_struct *vma; 3874 struct vm_area_struct *vma;
3875 void *old_buf = buf; 3875 void *old_buf = buf;
3876 int write = gup_flags & FOLL_WRITE;
3876 3877
3877 down_read(&mm->mmap_sem); 3878 down_read(&mm->mmap_sem);
3878 /* ignore errors, just check how much was successfully transferred */ 3879 /* ignore errors, just check how much was successfully transferred */
@@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3882 struct page *page = NULL; 3883 struct page *page = NULL;
3883 3884
3884 ret = get_user_pages_remote(tsk, mm, addr, 1, 3885 ret = get_user_pages_remote(tsk, mm, addr, 1,
3885 write, 1, &page, &vma); 3886 gup_flags, &page, &vma);
3886 if (ret <= 0) { 3887 if (ret <= 0) {
3887#ifndef CONFIG_HAVE_IOREMAP_PROT 3888#ifndef CONFIG_HAVE_IOREMAP_PROT
3888 break; 3889 break;
@@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3934 * @addr: start address to access 3935 * @addr: start address to access
3935 * @buf: source or destination buffer 3936 * @buf: source or destination buffer
3936 * @len: number of bytes to transfer 3937 * @len: number of bytes to transfer
3937 * @write: whether the access is a write 3938 * @gup_flags: flags modifying lookup behaviour
3938 * 3939 *
3939 * The caller must hold a reference on @mm. 3940 * The caller must hold a reference on @mm.
3940 */ 3941 */
3941int access_remote_vm(struct mm_struct *mm, unsigned long addr, 3942int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3942 void *buf, int len, int write) 3943 void *buf, int len, unsigned int gup_flags)
3943{ 3944{
3944 return __access_remote_vm(NULL, mm, addr, buf, len, write); 3945 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
3945} 3946}
3946 3947
3947/* 3948/*
@@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3950 * Do not walk the page table directly, use get_user_pages 3951 * Do not walk the page table directly, use get_user_pages
3951 */ 3952 */
3952int access_process_vm(struct task_struct *tsk, unsigned long addr, 3953int access_process_vm(struct task_struct *tsk, unsigned long addr,
3953 void *buf, int len, int write) 3954 void *buf, int len, unsigned int gup_flags)
3954{ 3955{
3955 struct mm_struct *mm; 3956 struct mm_struct *mm;
3956 int ret; 3957 int ret;
@@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
3959 if (!mm) 3960 if (!mm)
3960 return 0; 3961 return 0;
3961 3962
3962 ret = __access_remote_vm(tsk, mm, addr, buf, len, write); 3963 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
3964
3963 mmput(mm); 3965 mmput(mm);
3964 3966
3965 return ret; 3967 return ret;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 962927309b6e..cad4b9125695 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -268,7 +268,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
268 unsigned long i, pfn, end_pfn, nr_pages; 268 unsigned long i, pfn, end_pfn, nr_pages;
269 int node = pgdat->node_id; 269 int node = pgdat->node_id;
270 struct page *page; 270 struct page *page;
271 struct zone *zone;
272 271
273 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 272 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
274 page = virt_to_page(pgdat); 273 page = virt_to_page(pgdat);
@@ -276,19 +275,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
276 for (i = 0; i < nr_pages; i++, page++) 275 for (i = 0; i < nr_pages; i++, page++)
277 get_page_bootmem(node, page, NODE_INFO); 276 get_page_bootmem(node, page, NODE_INFO);
278 277
279 zone = &pgdat->node_zones[0];
280 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
281 if (zone_is_initialized(zone)) {
282 nr_pages = zone->wait_table_hash_nr_entries
283 * sizeof(wait_queue_head_t);
284 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
285 page = virt_to_page(zone->wait_table);
286
287 for (i = 0; i < nr_pages; i++, page++)
288 get_page_bootmem(node, page, NODE_INFO);
289 }
290 }
291
292 pfn = pgdat->node_start_pfn; 278 pfn = pgdat->node_start_pfn;
293 end_pfn = pgdat_end_pfn(pgdat); 279 end_pfn = pgdat_end_pfn(pgdat);
294 280
@@ -2131,7 +2117,6 @@ void try_offline_node(int nid)
2131 unsigned long start_pfn = pgdat->node_start_pfn; 2117 unsigned long start_pfn = pgdat->node_start_pfn;
2132 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 2118 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
2133 unsigned long pfn; 2119 unsigned long pfn;
2134 int i;
2135 2120
2136 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 2121 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2137 unsigned long section_nr = pfn_to_section_nr(pfn); 2122 unsigned long section_nr = pfn_to_section_nr(pfn);
@@ -2158,20 +2143,6 @@ void try_offline_node(int nid)
2158 */ 2143 */
2159 node_set_offline(nid); 2144 node_set_offline(nid);
2160 unregister_one_node(nid); 2145 unregister_one_node(nid);
2161
2162 /* free waittable in each zone */
2163 for (i = 0; i < MAX_NR_ZONES; i++) {
2164 struct zone *zone = pgdat->node_zones + i;
2165
2166 /*
2167 * wait_table may be allocated from boot memory,
2168 * here only free if it's allocated by vmalloc.
2169 */
2170 if (is_vmalloc_addr(zone->wait_table)) {
2171 vfree(zone->wait_table);
2172 zone->wait_table = NULL;
2173 }
2174 }
2175} 2146}
2176EXPORT_SYMBOL(try_offline_node); 2147EXPORT_SYMBOL(try_offline_node);
2177 2148
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ad1c96ac313c..0b859af06b87 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
850 struct page *p; 850 struct page *p;
851 int err; 851 int err;
852 852
853 err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL); 853 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
854 if (err >= 0) { 854 if (err >= 0) {
855 err = page_to_nid(p); 855 err = page_to_nid(p);
856 put_page(p); 856 put_page(p);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index bcdbe62f3e6d..11936526b08b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -25,7 +25,6 @@
25#include <linux/perf_event.h> 25#include <linux/perf_event.h>
26#include <linux/pkeys.h> 26#include <linux/pkeys.h>
27#include <linux/ksm.h> 27#include <linux/ksm.h>
28#include <linux/pkeys.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <asm/pgtable.h> 29#include <asm/pgtable.h>
31#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 95daf81a4855..8b8faaf2a9e9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -109,7 +109,7 @@ unsigned int kobjsize(const void *objp)
109 return PAGE_SIZE << compound_order(page); 109 return PAGE_SIZE << compound_order(page);
110} 110}
111 111
112long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 112static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
113 unsigned long start, unsigned long nr_pages, 113 unsigned long start, unsigned long nr_pages,
114 unsigned int foll_flags, struct page **pages, 114 unsigned int foll_flags, struct page **pages,
115 struct vm_area_struct **vmas, int *nonblocking) 115 struct vm_area_struct **vmas, int *nonblocking)
@@ -160,33 +160,25 @@ finish_or_fault:
160 * - don't permit access to VMAs that don't support it, such as I/O mappings 160 * - don't permit access to VMAs that don't support it, such as I/O mappings
161 */ 161 */
162long get_user_pages(unsigned long start, unsigned long nr_pages, 162long get_user_pages(unsigned long start, unsigned long nr_pages,
163 int write, int force, struct page **pages, 163 unsigned int gup_flags, struct page **pages,
164 struct vm_area_struct **vmas) 164 struct vm_area_struct **vmas)
165{ 165{
166 int flags = 0; 166 return __get_user_pages(current, current->mm, start, nr_pages,
167 167 gup_flags, pages, vmas, NULL);
168 if (write)
169 flags |= FOLL_WRITE;
170 if (force)
171 flags |= FOLL_FORCE;
172
173 return __get_user_pages(current, current->mm, start, nr_pages, flags,
174 pages, vmas, NULL);
175} 168}
176EXPORT_SYMBOL(get_user_pages); 169EXPORT_SYMBOL(get_user_pages);
177 170
178long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 171long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
179 int write, int force, struct page **pages, 172 unsigned int gup_flags, struct page **pages,
180 int *locked) 173 int *locked)
181{ 174{
182 return get_user_pages(start, nr_pages, write, force, pages, NULL); 175 return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
183} 176}
184EXPORT_SYMBOL(get_user_pages_locked); 177EXPORT_SYMBOL(get_user_pages_locked);
185 178
186long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 179long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
187 unsigned long start, unsigned long nr_pages, 180 unsigned long start, unsigned long nr_pages,
188 int write, int force, struct page **pages, 181 struct page **pages, unsigned int gup_flags)
189 unsigned int gup_flags)
190{ 182{
191 long ret; 183 long ret;
192 down_read(&mm->mmap_sem); 184 down_read(&mm->mmap_sem);
@@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
198EXPORT_SYMBOL(__get_user_pages_unlocked); 190EXPORT_SYMBOL(__get_user_pages_unlocked);
199 191
200long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 192long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
201 int write, int force, struct page **pages) 193 struct page **pages, unsigned int gup_flags)
202{ 194{
203 return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 195 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
204 write, force, pages, 0); 196 pages, gup_flags);
205} 197}
206EXPORT_SYMBOL(get_user_pages_unlocked); 198EXPORT_SYMBOL(get_user_pages_unlocked);
207 199
@@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe,
1817EXPORT_SYMBOL(filemap_map_pages); 1809EXPORT_SYMBOL(filemap_map_pages);
1818 1810
1819static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1811static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1820 unsigned long addr, void *buf, int len, int write) 1812 unsigned long addr, void *buf, int len, unsigned int gup_flags)
1821{ 1813{
1822 struct vm_area_struct *vma; 1814 struct vm_area_struct *vma;
1815 int write = gup_flags & FOLL_WRITE;
1823 1816
1824 down_read(&mm->mmap_sem); 1817 down_read(&mm->mmap_sem);
1825 1818
@@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1854 * @addr: start address to access 1847 * @addr: start address to access
1855 * @buf: source or destination buffer 1848 * @buf: source or destination buffer
1856 * @len: number of bytes to transfer 1849 * @len: number of bytes to transfer
1857 * @write: whether the access is a write 1850 * @gup_flags: flags modifying lookup behaviour
1858 * 1851 *
1859 * The caller must hold a reference on @mm. 1852 * The caller must hold a reference on @mm.
1860 */ 1853 */
1861int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1854int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1862 void *buf, int len, int write) 1855 void *buf, int len, unsigned int gup_flags)
1863{ 1856{
1864 return __access_remote_vm(NULL, mm, addr, buf, len, write); 1857 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
1865} 1858}
1866 1859
1867/* 1860/*
1868 * Access another process' address space. 1861 * Access another process' address space.
1869 * - source/target buffer must be kernel space 1862 * - source/target buffer must be kernel space
1870 */ 1863 */
1871int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 1864int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1865 unsigned int gup_flags)
1872{ 1866{
1873 struct mm_struct *mm; 1867 struct mm_struct *mm;
1874 1868
@@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1879 if (!mm) 1873 if (!mm)
1880 return 0; 1874 return 0;
1881 1875
1882 len = __access_remote_vm(tsk, mm, addr, buf, len, write); 1876 len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
1883 1877
1884 mmput(mm); 1878 mmput(mm);
1885 return len; 1879 return len;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2b3bf6767d54..8fd42aa7c4bd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4224,7 +4224,7 @@ static void show_migration_types(unsigned char type)
4224 } 4224 }
4225 4225
4226 *p = '\0'; 4226 *p = '\0';
4227 printk("(%s) ", tmp); 4227 printk(KERN_CONT "(%s) ", tmp);
4228} 4228}
4229 4229
4230/* 4230/*
@@ -4335,7 +4335,8 @@ void show_free_areas(unsigned int filter)
4335 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4335 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4336 4336
4337 show_node(zone); 4337 show_node(zone);
4338 printk("%s" 4338 printk(KERN_CONT
4339 "%s"
4339 " free:%lukB" 4340 " free:%lukB"
4340 " min:%lukB" 4341 " min:%lukB"
4341 " low:%lukB" 4342 " low:%lukB"
@@ -4382,8 +4383,8 @@ void show_free_areas(unsigned int filter)
4382 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 4383 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4383 printk("lowmem_reserve[]:"); 4384 printk("lowmem_reserve[]:");
4384 for (i = 0; i < MAX_NR_ZONES; i++) 4385 for (i = 0; i < MAX_NR_ZONES; i++)
4385 printk(" %ld", zone->lowmem_reserve[i]); 4386 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4386 printk("\n"); 4387 printk(KERN_CONT "\n");
4387 } 4388 }
4388 4389
4389 for_each_populated_zone(zone) { 4390 for_each_populated_zone(zone) {
@@ -4394,7 +4395,7 @@ void show_free_areas(unsigned int filter)
4394 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4395 if (skip_free_areas_node(filter, zone_to_nid(zone)))
4395 continue; 4396 continue;
4396 show_node(zone); 4397 show_node(zone);
4397 printk("%s: ", zone->name); 4398 printk(KERN_CONT "%s: ", zone->name);
4398 4399
4399 spin_lock_irqsave(&zone->lock, flags); 4400 spin_lock_irqsave(&zone->lock, flags);
4400 for (order = 0; order < MAX_ORDER; order++) { 4401 for (order = 0; order < MAX_ORDER; order++) {
@@ -4412,11 +4413,12 @@ void show_free_areas(unsigned int filter)
4412 } 4413 }
4413 spin_unlock_irqrestore(&zone->lock, flags); 4414 spin_unlock_irqrestore(&zone->lock, flags);
4414 for (order = 0; order < MAX_ORDER; order++) { 4415 for (order = 0; order < MAX_ORDER; order++) {
4415 printk("%lu*%lukB ", nr[order], K(1UL) << order); 4416 printk(KERN_CONT "%lu*%lukB ",
4417 nr[order], K(1UL) << order);
4416 if (nr[order]) 4418 if (nr[order])
4417 show_migration_types(types[order]); 4419 show_migration_types(types[order]);
4418 } 4420 }
4419 printk("= %lukB\n", K(total)); 4421 printk(KERN_CONT "= %lukB\n", K(total));
4420 } 4422 }
4421 4423
4422 hugetlb_show_meminfo(); 4424 hugetlb_show_meminfo();
@@ -4977,72 +4979,6 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
4977} 4979}
4978 4980
4979/* 4981/*
4980 * Helper functions to size the waitqueue hash table.
4981 * Essentially these want to choose hash table sizes sufficiently
4982 * large so that collisions trying to wait on pages are rare.
4983 * But in fact, the number of active page waitqueues on typical
4984 * systems is ridiculously low, less than 200. So this is even
4985 * conservative, even though it seems large.
4986 *
4987 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
4988 * waitqueues, i.e. the size of the waitq table given the number of pages.
4989 */
4990#define PAGES_PER_WAITQUEUE 256
4991
4992#ifndef CONFIG_MEMORY_HOTPLUG
4993static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4994{
4995 unsigned long size = 1;
4996
4997 pages /= PAGES_PER_WAITQUEUE;
4998
4999 while (size < pages)
5000 size <<= 1;
5001
5002 /*
5003 * Once we have dozens or even hundreds of threads sleeping
5004 * on IO we've got bigger problems than wait queue collision.
5005 * Limit the size of the wait table to a reasonable size.
5006 */
5007 size = min(size, 4096UL);
5008
5009 return max(size, 4UL);
5010}
5011#else
5012/*
5013 * A zone's size might be changed by hot-add, so it is not possible to determine
5014 * a suitable size for its wait_table. So we use the maximum size now.
5015 *
5016 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
5017 *
5018 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
5019 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
5020 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
5021 *
5022 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
5023 * or more by the traditional way. (See above). It equals:
5024 *
5025 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
5026 * ia64(16K page size) : = ( 8G + 4M)byte.
5027 * powerpc (64K page size) : = (32G +16M)byte.
5028 */
5029static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
5030{
5031 return 4096UL;
5032}
5033#endif
5034
5035/*
5036 * This is an integer logarithm so that shifts can be used later
5037 * to extract the more random high bits from the multiplicative
5038 * hash function before the remainder is taken.
5039 */
5040static inline unsigned long wait_table_bits(unsigned long size)
5041{
5042 return ffz(~size);
5043}
5044
5045/*
5046 * Initially all pages are reserved - free ones are freed 4982 * Initially all pages are reserved - free ones are freed
5047 * up by free_all_bootmem() once the early boot process is 4983 * up by free_all_bootmem() once the early boot process is
5048 * done. Non-atomic initialization, single-pass. 4984 * done. Non-atomic initialization, single-pass.
@@ -5304,49 +5240,6 @@ void __init setup_per_cpu_pageset(void)
5304 alloc_percpu(struct per_cpu_nodestat); 5240 alloc_percpu(struct per_cpu_nodestat);
5305} 5241}
5306 5242
5307static noinline __ref
5308int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
5309{
5310 int i;
5311 size_t alloc_size;
5312
5313 /*
5314 * The per-page waitqueue mechanism uses hashed waitqueues
5315 * per zone.
5316 */
5317 zone->wait_table_hash_nr_entries =
5318 wait_table_hash_nr_entries(zone_size_pages);
5319 zone->wait_table_bits =
5320 wait_table_bits(zone->wait_table_hash_nr_entries);
5321 alloc_size = zone->wait_table_hash_nr_entries
5322 * sizeof(wait_queue_head_t);
5323
5324 if (!slab_is_available()) {
5325 zone->wait_table = (wait_queue_head_t *)
5326 memblock_virt_alloc_node_nopanic(
5327 alloc_size, zone->zone_pgdat->node_id);
5328 } else {
5329 /*
5330 * This case means that a zone whose size was 0 gets new memory
5331 * via memory hot-add.
5332 * But it may be the case that a new node was hot-added. In
5333 * this case vmalloc() will not be able to use this new node's
5334 * memory - this wait_table must be initialized to use this new
5335 * node itself as well.
5336 * To use this new node's memory, further consideration will be
5337 * necessary.
5338 */
5339 zone->wait_table = vmalloc(alloc_size);
5340 }
5341 if (!zone->wait_table)
5342 return -ENOMEM;
5343
5344 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
5345 init_waitqueue_head(zone->wait_table + i);
5346
5347 return 0;
5348}
5349
5350static __meminit void zone_pcp_init(struct zone *zone) 5243static __meminit void zone_pcp_init(struct zone *zone)
5351{ 5244{
5352 /* 5245 /*
@@ -5367,10 +5260,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
5367 unsigned long size) 5260 unsigned long size)
5368{ 5261{
5369 struct pglist_data *pgdat = zone->zone_pgdat; 5262 struct pglist_data *pgdat = zone->zone_pgdat;
5370 int ret; 5263
5371 ret = zone_wait_table_init(zone, size);
5372 if (ret)
5373 return ret;
5374 pgdat->nr_zones = zone_idx(zone) + 1; 5264 pgdat->nr_zones = zone_idx(zone) + 1;
5375 5265
5376 zone->zone_start_pfn = zone_start_pfn; 5266 zone->zone_start_pfn = zone_start_pfn;
@@ -5382,6 +5272,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
5382 zone_start_pfn, (zone_start_pfn + size)); 5272 zone_start_pfn, (zone_start_pfn + size));
5383 5273
5384 zone_init_free_lists(zone); 5274 zone_init_free_lists(zone);
5275 zone->initialized = 1;
5385 5276
5386 return 0; 5277 return 0;
5387} 5278}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 07514d41ebcc..be8dc8d1edb9 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
88 ssize_t rc = 0; 88 ssize_t rc = 0;
89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 89 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
90 / sizeof(struct pages *); 90 / sizeof(struct pages *);
91 unsigned int flags = FOLL_REMOTE;
91 92
92 /* Work out address and page range required */ 93 /* Work out address and page range required */
93 if (len == 0) 94 if (len == 0)
94 return 0; 95 return 0;
95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 96 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
96 97
98 if (vm_write)
99 flags |= FOLL_WRITE;
100
97 while (!rc && nr_pages && iov_iter_count(iter)) { 101 while (!rc && nr_pages && iov_iter_count(iter)) {
98 int pages = min(nr_pages, max_pages_per_loop); 102 int pages = min(nr_pages, max_pages_per_loop);
99 size_t bytes; 103 size_t bytes;
@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
104 * current/current->mm 108 * current/current->mm
105 */ 109 */
106 pages = __get_user_pages_unlocked(task, mm, pa, pages, 110 pages = __get_user_pages_unlocked(task, mm, pa, pages,
107 vm_write, 0, process_pages, 111 process_pages, flags);
108 FOLL_REMOTE);
109 if (pages <= 0) 112 if (pages <= 0)
110 return -EFAULT; 113 return -EFAULT;
111 114
diff --git a/mm/slab.c b/mm/slab.c
index 090fb26b3a39..0b0550ca85b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
233 spin_lock_init(&parent->list_lock); 233 spin_lock_init(&parent->list_lock);
234 parent->free_objects = 0; 234 parent->free_objects = 0;
235 parent->free_touched = 0; 235 parent->free_touched = 0;
236 parent->num_slabs = 0;
236} 237}
237 238
238#define MAKE_LIST(cachep, listp, slab, nodeid) \ 239#define MAKE_LIST(cachep, listp, slab, nodeid) \
@@ -966,7 +967,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
966 * guaranteed to be valid until irq is re-enabled, because it will be 967 * guaranteed to be valid until irq is re-enabled, because it will be
967 * freed after synchronize_sched(). 968 * freed after synchronize_sched().
968 */ 969 */
969 if (force_change) 970 if (old_shared && force_change)
970 synchronize_sched(); 971 synchronize_sched();
971 972
972fail: 973fail:
@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1382 for_each_kmem_cache_node(cachep, node, n) { 1383 for_each_kmem_cache_node(cachep, node, n) {
1383 unsigned long active_objs = 0, num_objs = 0, free_objects = 0; 1384 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1384 unsigned long active_slabs = 0, num_slabs = 0; 1385 unsigned long active_slabs = 0, num_slabs = 0;
1386 unsigned long num_slabs_partial = 0, num_slabs_free = 0;
1387 unsigned long num_slabs_full;
1385 1388
1386 spin_lock_irqsave(&n->list_lock, flags); 1389 spin_lock_irqsave(&n->list_lock, flags);
1387 list_for_each_entry(page, &n->slabs_full, lru) { 1390 num_slabs = n->num_slabs;
1388 active_objs += cachep->num;
1389 active_slabs++;
1390 }
1391 list_for_each_entry(page, &n->slabs_partial, lru) { 1391 list_for_each_entry(page, &n->slabs_partial, lru) {
1392 active_objs += page->active; 1392 active_objs += page->active;
1393 active_slabs++; 1393 num_slabs_partial++;
1394 } 1394 }
1395 list_for_each_entry(page, &n->slabs_free, lru) 1395 list_for_each_entry(page, &n->slabs_free, lru)
1396 num_slabs++; 1396 num_slabs_free++;
1397 1397
1398 free_objects += n->free_objects; 1398 free_objects += n->free_objects;
1399 spin_unlock_irqrestore(&n->list_lock, flags); 1399 spin_unlock_irqrestore(&n->list_lock, flags);
1400 1400
1401 num_slabs += active_slabs;
1402 num_objs = num_slabs * cachep->num; 1401 num_objs = num_slabs * cachep->num;
1402 active_slabs = num_slabs - num_slabs_free;
1403 num_slabs_full = num_slabs -
1404 (num_slabs_partial + num_slabs_free);
1405 active_objs += (num_slabs_full * cachep->num);
1406
1403 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", 1407 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1404 node, active_slabs, num_slabs, active_objs, num_objs, 1408 node, active_slabs, num_slabs, active_objs, num_objs,
1405 free_objects); 1409 free_objects);
@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache,
2314 2318
2315 page = list_entry(p, struct page, lru); 2319 page = list_entry(p, struct page, lru);
2316 list_del(&page->lru); 2320 list_del(&page->lru);
2321 n->num_slabs--;
2317 /* 2322 /*
2318 * Safe to drop the lock. The slab is no longer linked 2323 * Safe to drop the lock. The slab is no longer linked
2319 * to the cache. 2324 * to the cache.
@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2752 list_add_tail(&page->lru, &(n->slabs_free)); 2757 list_add_tail(&page->lru, &(n->slabs_free));
2753 else 2758 else
2754 fixup_slab_list(cachep, n, page, &list); 2759 fixup_slab_list(cachep, n, page, &list);
2760
2761 n->num_slabs++;
2755 STATS_INC_GROWN(cachep); 2762 STATS_INC_GROWN(cachep);
2756 n->free_objects += cachep->num - page->active; 2763 n->free_objects += cachep->num - page->active;
2757 spin_unlock(&n->list_lock); 2764 spin_unlock(&n->list_lock);
@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
3443 3450
3444 page = list_last_entry(&n->slabs_free, struct page, lru); 3451 page = list_last_entry(&n->slabs_free, struct page, lru);
3445 list_move(&page->lru, list); 3452 list_move(&page->lru, list);
3453 n->num_slabs--;
3446 } 3454 }
3447} 3455}
3448 3456
@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4099 unsigned long num_objs; 4107 unsigned long num_objs;
4100 unsigned long active_slabs = 0; 4108 unsigned long active_slabs = 0;
4101 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4109 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4110 unsigned long num_slabs_partial = 0, num_slabs_free = 0;
4111 unsigned long num_slabs_full = 0;
4102 const char *name; 4112 const char *name;
4103 char *error = NULL; 4113 char *error = NULL;
4104 int node; 4114 int node;
@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4111 check_irq_on(); 4121 check_irq_on();
4112 spin_lock_irq(&n->list_lock); 4122 spin_lock_irq(&n->list_lock);
4113 4123
4114 list_for_each_entry(page, &n->slabs_full, lru) { 4124 num_slabs += n->num_slabs;
4115 if (page->active != cachep->num && !error) 4125
4116 error = "slabs_full accounting error";
4117 active_objs += cachep->num;
4118 active_slabs++;
4119 }
4120 list_for_each_entry(page, &n->slabs_partial, lru) { 4126 list_for_each_entry(page, &n->slabs_partial, lru) {
4121 if (page->active == cachep->num && !error) 4127 if (page->active == cachep->num && !error)
4122 error = "slabs_partial accounting error"; 4128 error = "slabs_partial accounting error";
4123 if (!page->active && !error) 4129 if (!page->active && !error)
4124 error = "slabs_partial accounting error"; 4130 error = "slabs_partial accounting error";
4125 active_objs += page->active; 4131 active_objs += page->active;
4126 active_slabs++; 4132 num_slabs_partial++;
4127 } 4133 }
4134
4128 list_for_each_entry(page, &n->slabs_free, lru) { 4135 list_for_each_entry(page, &n->slabs_free, lru) {
4129 if (page->active && !error) 4136 if (page->active && !error)
4130 error = "slabs_free accounting error"; 4137 error = "slabs_free accounting error";
4131 num_slabs++; 4138 num_slabs_free++;
4132 } 4139 }
4140
4133 free_objects += n->free_objects; 4141 free_objects += n->free_objects;
4134 if (n->shared) 4142 if (n->shared)
4135 shared_avail += n->shared->avail; 4143 shared_avail += n->shared->avail;
4136 4144
4137 spin_unlock_irq(&n->list_lock); 4145 spin_unlock_irq(&n->list_lock);
4138 } 4146 }
4139 num_slabs += active_slabs;
4140 num_objs = num_slabs * cachep->num; 4147 num_objs = num_slabs * cachep->num;
4148 active_slabs = num_slabs - num_slabs_free;
4149 num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
4150 active_objs += (num_slabs_full * cachep->num);
4151
4141 if (num_objs - active_objs != free_objects && !error) 4152 if (num_objs - active_objs != free_objects && !error)
4142 error = "free_objects accounting error"; 4153 error = "free_objects accounting error";
4143 4154
diff --git a/mm/slab.h b/mm/slab.h
index 9653f2e2591a..bc05fdc3edce 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -432,6 +432,7 @@ struct kmem_cache_node {
432 struct list_head slabs_partial; /* partial list first, better asm code */ 432 struct list_head slabs_partial; /* partial list first, better asm code */
433 struct list_head slabs_full; 433 struct list_head slabs_full;
434 struct list_head slabs_free; 434 struct list_head slabs_free;
435 unsigned long num_slabs;
435 unsigned long free_objects; 436 unsigned long free_objects;
436 unsigned int free_limit; 437 unsigned int free_limit;
437 unsigned int colour_next; /* Per-node cache coloring */ 438 unsigned int colour_next; /* Per-node cache coloring */
diff --git a/mm/util.c b/mm/util.c
index 662cddf914af..1a41553db866 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,8 +230,10 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
230} 230}
231 231
232/* Check if the vma is being used as a stack by this task */ 232/* Check if the vma is being used as a stack by this task */
233int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) 233int vma_is_stack_for_current(struct vm_area_struct *vma)
234{ 234{
235 struct task_struct * __maybe_unused t = current;
236
235 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 237 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
236} 238}
237 239
@@ -283,7 +285,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
283int __weak get_user_pages_fast(unsigned long start, 285int __weak get_user_pages_fast(unsigned long start,
284 int nr_pages, int write, struct page **pages) 286 int nr_pages, int write, struct page **pages)
285{ 287{
286 return get_user_pages_unlocked(start, nr_pages, write, 0, pages); 288 return get_user_pages_unlocked(start, nr_pages, pages,
289 write ? FOLL_WRITE : 0);
287} 290}
288EXPORT_SYMBOL_GPL(get_user_pages_fast); 291EXPORT_SYMBOL_GPL(get_user_pages_fast);
289 292
@@ -623,7 +626,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
623 if (len > buflen) 626 if (len > buflen)
624 len = buflen; 627 len = buflen;
625 628
626 res = access_process_vm(task, arg_start, buffer, len, 0); 629 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
627 630
628 /* 631 /*
629 * If the nul at the end of args has been overwritten, then 632 * If the nul at the end of args has been overwritten, then
@@ -638,7 +641,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
638 if (len > buflen - res) 641 if (len > buflen - res)
639 len = buflen - res; 642 len = buflen - res;
640 res += access_process_vm(task, env_start, 643 res += access_process_vm(task, env_start,
641 buffer+res, len, 0); 644 buffer+res, len,
645 FOLL_FORCE);
642 res = strnlen(buffer, res); 646 res = strnlen(buffer, res);
643 } 647 }
644 } 648 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 744f926af442..76fda2268148 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3043,7 +3043,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3043 sc.gfp_mask, 3043 sc.gfp_mask,
3044 sc.reclaim_idx); 3044 sc.reclaim_idx);
3045 3045
3046 current->flags |= PF_MEMALLOC;
3046 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 3047 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3048 current->flags &= ~PF_MEMALLOC;
3047 3049
3048 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 3050 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3049 3051
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 00d2601407c5..1a7c9a79a53c 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
26 while (got < num_pages) { 26 while (got < num_pages) {
27 rc = get_user_pages_unlocked( 27 rc = get_user_pages_unlocked(
28 (unsigned long)data + ((unsigned long)got * PAGE_SIZE), 28 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
29 num_pages - got, write_page, 0, pages + got); 29 num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
30 if (rc < 0) 30 if (rc < 0)
31 break; 31 break;
32 BUG_ON(rc == 0); 32 BUG_ON(rc == 0);
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index f826e8739023..d942c7c2bc0a 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,7 +41,7 @@ config BIG_KEYS
41 bool "Large payload keys" 41 bool "Large payload keys"
42 depends on KEYS 42 depends on KEYS
43 depends on TMPFS 43 depends on TMPFS
44 select CRYPTO 44 depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
45 select CRYPTO_AES 45 select CRYPTO_AES
46 select CRYPTO_ECB 46 select CRYPTO_ECB
47 select CRYPTO_RNG 47 select CRYPTO_RNG
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c0b3030b5634..835c1ab30d01 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -9,6 +9,7 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) "big_key: "fmt
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/seq_file.h> 14#include <linux/seq_file.h>
14#include <linux/file.h> 15#include <linux/file.h>
@@ -341,44 +342,48 @@ error:
341 */ 342 */
342static int __init big_key_init(void) 343static int __init big_key_init(void)
343{ 344{
344 return register_key_type(&key_type_big_key); 345 struct crypto_skcipher *cipher;
345} 346 struct crypto_rng *rng;
346 347 int ret;
347/*
348 * Initialize big_key crypto and RNG algorithms
349 */
350static int __init big_key_crypto_init(void)
351{
352 int ret = -EINVAL;
353 348
354 /* init RNG */ 349 rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
355 big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0); 350 if (IS_ERR(rng)) {
356 if (IS_ERR(big_key_rng)) { 351 pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
357 big_key_rng = NULL; 352 return PTR_ERR(rng);
358 return -EFAULT;
359 } 353 }
360 354
355 big_key_rng = rng;
356
361 /* seed RNG */ 357 /* seed RNG */
362 ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng)); 358 ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
363 if (ret) 359 if (ret) {
364 goto error; 360 pr_err("Can't reset rng: %d\n", ret);
361 goto error_rng;
362 }
365 363
366 /* init block cipher */ 364 /* init block cipher */
367 big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name, 365 cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
368 0, CRYPTO_ALG_ASYNC); 366 if (IS_ERR(cipher)) {
369 if (IS_ERR(big_key_skcipher)) { 367 ret = PTR_ERR(cipher);
370 big_key_skcipher = NULL; 368 pr_err("Can't alloc crypto: %d\n", ret);
371 ret = -EFAULT; 369 goto error_rng;
372 goto error; 370 }
371
372 big_key_skcipher = cipher;
373
374 ret = register_key_type(&key_type_big_key);
375 if (ret < 0) {
376 pr_err("Can't register type: %d\n", ret);
377 goto error_cipher;
373 } 378 }
374 379
375 return 0; 380 return 0;
376 381
377error: 382error_cipher:
383 crypto_free_skcipher(big_key_skcipher);
384error_rng:
378 crypto_free_rng(big_key_rng); 385 crypto_free_rng(big_key_rng);
379 big_key_rng = NULL;
380 return ret; 386 return ret;
381} 387}
382 388
383device_initcall(big_key_init); 389late_initcall(big_key_init);
384late_initcall(big_key_crypto_init);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index f0611a6368cd..b9f531c9e4fa 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
181 struct timespec now; 181 struct timespec now;
182 unsigned long timo; 182 unsigned long timo;
183 key_ref_t key_ref, skey_ref; 183 key_ref_t key_ref, skey_ref;
184 char xbuf[12]; 184 char xbuf[16];
185 int rc; 185 int rc;
186 186
187 struct keyring_search_context ctx = { 187 struct keyring_search_context ctx = {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 085057936287..09fd6108e421 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3557,7 +3557,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
3557 } else if (!vma->vm_file && 3557 } else if (!vma->vm_file &&
3558 ((vma->vm_start <= vma->vm_mm->start_stack && 3558 ((vma->vm_start <= vma->vm_mm->start_stack &&
3559 vma->vm_end >= vma->vm_mm->start_stack) || 3559 vma->vm_end >= vma->vm_mm->start_stack) ||
3560 vma_is_stack_for_task(vma, current))) { 3560 vma_is_stack_for_current(vma))) {
3561 rc = current_has_perm(current, PROCESS__EXECSTACK); 3561 rc = current_has_perm(current, PROCESS__EXECSTACK);
3562 } else if (vma->vm_file && vma->anon_vma) { 3562 } else if (vma->vm_file && vma->anon_vma) {
3563 /* 3563 /*
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index ade7c6cad172..682b73af7766 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
881 * the execve(). 881 * the execve().
882 */ 882 */
883 if (get_user_pages_remote(current, bprm->mm, pos, 1, 883 if (get_user_pages_remote(current, bprm->mm, pos, 1,
884 0, 1, &page, NULL) <= 0) 884 FOLL_FORCE, &page, NULL) <= 0)
885 return false; 885 return false;
886#else 886#else
887 page = bprm->page[pos / PAGE_SIZE]; 887 page = bprm->page[pos / PAGE_SIZE];
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index dcc102813aef..37d9cfbc29f9 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
448 448
449 ktime_get_ts64(&tm); 449 ktime_get_ts64(&tm);
450 tm = timespec64_sub(tm, tmr->last_update); 450 tm = timespec64_sub(tm, tmr->last_update);
451 cur_time.tv_nsec = tm.tv_nsec; 451 cur_time.tv_nsec += tm.tv_nsec;
452 cur_time.tv_sec = tm.tv_sec; 452 cur_time.tv_sec += tm.tv_sec;
453 snd_seq_sanity_real_time(&cur_time); 453 snd_seq_sanity_real_time(&cur_time);
454 } 454 }
455 spin_unlock_irqrestore(&tmr->lock, flags); 455 spin_unlock_irqrestore(&tmr->lock, flags);
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index d17937b92331..7e3aa50b21f9 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -111,7 +111,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
111 return -EINVAL; 111 return -EINVAL;
112 112
113 hm = kmalloc(sizeof(*hm), GFP_KERNEL); 113 hm = kmalloc(sizeof(*hm), GFP_KERNEL);
114 hr = kmalloc(sizeof(*hr), GFP_KERNEL); 114 hr = kzalloc(sizeof(*hr), GFP_KERNEL);
115 if (!hm || !hr) { 115 if (!hm || !hr) {
116 err = -ENOMEM; 116 err = -ENOMEM;
117 goto out; 117 goto out;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c3469f756ec2..c64d986009a9 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -341,8 +341,7 @@ enum {
341 341
342/* quirks for Nvidia */ 342/* quirks for Nvidia */
343#define AZX_DCAPS_PRESET_NVIDIA \ 343#define AZX_DCAPS_PRESET_NVIDIA \
344 (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \ 344 (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
345 AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
346 AZX_DCAPS_SNOOP_TYPE(NVIDIA)) 345 AZX_DCAPS_SNOOP_TYPE(NVIDIA))
347 346
348#define AZX_DCAPS_PRESET_CTHDA \ 347#define AZX_DCAPS_PRESET_CTHDA \
@@ -1716,6 +1715,10 @@ static int azx_first_init(struct azx *chip)
1716 } 1715 }
1717 } 1716 }
1718 1717
1718 /* NVidia hardware normally only supports up to 40 bits of DMA */
1719 if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
1720 dma_bits = 40;
1721
1719 /* disable 64bit DMA address on some devices */ 1722 /* disable 64bit DMA address on some devices */
1720 if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { 1723 if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
1721 dev_dbg(card->dev, "Disabling 64bit DMA\n"); 1724 dev_dbg(card->dev, "Disabling 64bit DMA\n");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b58e8c76346a..2f909dd8b7b8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5811,8 +5811,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5811#define ALC295_STANDARD_PINS \ 5811#define ALC295_STANDARD_PINS \
5812 {0x12, 0xb7a60130}, \ 5812 {0x12, 0xb7a60130}, \
5813 {0x14, 0x90170110}, \ 5813 {0x14, 0x90170110}, \
5814 {0x17, 0x21014020}, \
5815 {0x18, 0x21a19030}, \
5816 {0x21, 0x04211020} 5814 {0x21, 0x04211020}
5817 5815
5818#define ALC298_STANDARD_PINS \ 5816#define ALC298_STANDARD_PINS \
@@ -5859,11 +5857,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5859 {0x1b, 0x02011020}, 5857 {0x1b, 0x02011020},
5860 {0x21, 0x0221101f}), 5858 {0x21, 0x0221101f}),
5861 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5859 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5860 {0x14, 0x90170110},
5861 {0x1b, 0x01011020},
5862 {0x21, 0x0221101f}),
5863 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5862 {0x14, 0x90170130}, 5864 {0x14, 0x90170130},
5863 {0x1b, 0x01014020}, 5865 {0x1b, 0x01014020},
5864 {0x21, 0x0221103f}), 5866 {0x21, 0x0221103f}),
5865 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5867 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5866 {0x14, 0x90170130}, 5868 {0x14, 0x90170130},
5869 {0x1b, 0x01011020},
5870 {0x21, 0x0221103f}),
5871 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5872 {0x14, 0x90170130},
5867 {0x1b, 0x02011020}, 5873 {0x1b, 0x02011020},
5868 {0x21, 0x0221103f}), 5874 {0x21, 0x0221103f}),
5869 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5875 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -6039,7 +6045,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6039 ALC292_STANDARD_PINS, 6045 ALC292_STANDARD_PINS,
6040 {0x13, 0x90a60140}), 6046 {0x13, 0x90a60140}),
6041 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 6047 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
6042 ALC295_STANDARD_PINS), 6048 ALC295_STANDARD_PINS,
6049 {0x17, 0x21014020},
6050 {0x18, 0x21a19030}),
6051 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
6052 ALC295_STANDARD_PINS,
6053 {0x17, 0x21014040},
6054 {0x18, 0x21a19050}),
6043 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 6055 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
6044 ALC298_STANDARD_PINS, 6056 ALC298_STANDARD_PINS,
6045 {0x17, 0x90170110}), 6057 {0x17, 0x90170110}),
@@ -6613,6 +6625,7 @@ enum {
6613 ALC891_FIXUP_HEADSET_MODE, 6625 ALC891_FIXUP_HEADSET_MODE,
6614 ALC891_FIXUP_DELL_MIC_NO_PRESENCE, 6626 ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
6615 ALC662_FIXUP_ACER_VERITON, 6627 ALC662_FIXUP_ACER_VERITON,
6628 ALC892_FIXUP_ASROCK_MOBO,
6616}; 6629};
6617 6630
6618static const struct hda_fixup alc662_fixups[] = { 6631static const struct hda_fixup alc662_fixups[] = {
@@ -6889,6 +6902,16 @@ static const struct hda_fixup alc662_fixups[] = {
6889 { } 6902 { }
6890 } 6903 }
6891 }, 6904 },
6905 [ALC892_FIXUP_ASROCK_MOBO] = {
6906 .type = HDA_FIXUP_PINS,
6907 .v.pins = (const struct hda_pintbl[]) {
6908 { 0x15, 0x40f000f0 }, /* disabled */
6909 { 0x16, 0x40f000f0 }, /* disabled */
6910 { 0x18, 0x01014011 }, /* LO */
6911 { 0x1a, 0x01014012 }, /* LO */
6912 { }
6913 }
6914 },
6892}; 6915};
6893 6916
6894static const struct snd_pci_quirk alc662_fixup_tbl[] = { 6917static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6926,6 +6949,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6926 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 6949 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
6927 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), 6950 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
6928 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), 6951 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
6952 SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
6929 SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), 6953 SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
6930 SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON), 6954 SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
6931 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), 6955 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c60a776e815d..8a59d4782a0f 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
2907AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"), 2907AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
2908AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), 2908AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2909 2909
2910/* Syntek STK1160 */
2911{
2912 .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
2913 USB_DEVICE_ID_MATCH_INT_CLASS |
2914 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
2915 .idVendor = 0x05e1,
2916 .idProduct = 0x0408,
2917 .bInterfaceClass = USB_CLASS_AUDIO,
2918 .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
2919 .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
2920 .vendor_name = "Syntek",
2921 .product_name = "STK1160",
2922 .ifnum = QUIRK_ANY_INTERFACE,
2923 .type = QUIRK_AUDIO_ALIGN_TRANSFER
2924 }
2925},
2926
2910/* Digidesign Mbox */ 2927/* Digidesign Mbox */
2911{ 2928{
2912 /* Thanks to Clemens Ladisch <clemens@ladisch.de> */ 2929 /* Thanks to Clemens Ladisch <clemens@ladisch.de> */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 1188bc849ee3..a39629206864 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -194,6 +194,8 @@
194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 194#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
195 195
196#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 196#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
197#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
198#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
197 199
198/* Virtualization flags: Linux defined, word 8 */ 200/* Virtualization flags: Linux defined, word 8 */
199#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 201#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index c0c0b265e88e..b63a31be1218 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -98,6 +98,15 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
98 *type = INSN_FP_SETUP; 98 *type = INSN_FP_SETUP;
99 break; 99 break;
100 100
101 case 0x8d:
102 if (insn.rex_prefix.bytes &&
103 insn.rex_prefix.bytes[0] == 0x48 &&
104 insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
105 insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
106 /* lea %(rsp), %rbp */
107 *type = INSN_FP_SETUP;
108 break;
109
101 case 0x90: 110 case 0x90:
102 *type = INSN_NOP; 111 *type = INSN_NOP;
103 break; 112 break;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 143b6cdd7f06..e8a1f699058a 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -97,6 +97,19 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
97 return next; 97 return next;
98} 98}
99 99
100static bool gcov_enabled(struct objtool_file *file)
101{
102 struct section *sec;
103 struct symbol *sym;
104
105 list_for_each_entry(sec, &file->elf->sections, list)
106 list_for_each_entry(sym, &sec->symbol_list, list)
107 if (!strncmp(sym->name, "__gcov_.", 8))
108 return true;
109
110 return false;
111}
112
100#define for_each_insn(file, insn) \ 113#define for_each_insn(file, insn) \
101 list_for_each_entry(insn, &file->insn_list, list) 114 list_for_each_entry(insn, &file->insn_list, list)
102 115
@@ -713,6 +726,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
713 struct instruction *insn) 726 struct instruction *insn)
714{ 727{
715 struct rela *text_rela, *rodata_rela; 728 struct rela *text_rela, *rodata_rela;
729 struct instruction *orig_insn = insn;
716 730
717 text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); 731 text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
718 if (text_rela && text_rela->sym == file->rodata->sym) { 732 if (text_rela && text_rela->sym == file->rodata->sym) {
@@ -733,10 +747,16 @@ static struct rela *find_switch_table(struct objtool_file *file,
733 747
734 /* case 3 */ 748 /* case 3 */
735 func_for_each_insn_continue_reverse(file, func, insn) { 749 func_for_each_insn_continue_reverse(file, func, insn) {
736 if (insn->type == INSN_JUMP_UNCONDITIONAL || 750 if (insn->type == INSN_JUMP_DYNAMIC)
737 insn->type == INSN_JUMP_DYNAMIC)
738 break; 751 break;
739 752
753 /* allow small jumps within the range */
754 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
755 insn->jump_dest &&
756 (insn->jump_dest->offset <= insn->offset ||
757 insn->jump_dest->offset > orig_insn->offset))
758 break;
759
740 text_rela = find_rela_by_dest_range(insn->sec, insn->offset, 760 text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
741 insn->len); 761 insn->len);
742 if (text_rela && text_rela->sym == file->rodata->sym) 762 if (text_rela && text_rela->sym == file->rodata->sym)
@@ -1034,34 +1054,6 @@ static int validate_branch(struct objtool_file *file,
1034 return 0; 1054 return 0;
1035} 1055}
1036 1056
1037static bool is_gcov_insn(struct instruction *insn)
1038{
1039 struct rela *rela;
1040 struct section *sec;
1041 struct symbol *sym;
1042 unsigned long offset;
1043
1044 rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
1045 if (!rela)
1046 return false;
1047
1048 if (rela->sym->type != STT_SECTION)
1049 return false;
1050
1051 sec = rela->sym->sec;
1052 offset = rela->addend + insn->offset + insn->len - rela->offset;
1053
1054 list_for_each_entry(sym, &sec->symbol_list, list) {
1055 if (sym->type != STT_OBJECT)
1056 continue;
1057
1058 if (offset >= sym->offset && offset < sym->offset + sym->len)
1059 return (!memcmp(sym->name, "__gcov0.", 8));
1060 }
1061
1062 return false;
1063}
1064
1065static bool is_kasan_insn(struct instruction *insn) 1057static bool is_kasan_insn(struct instruction *insn)
1066{ 1058{
1067 return (insn->type == INSN_CALL && 1059 return (insn->type == INSN_CALL &&
@@ -1083,9 +1075,6 @@ static bool ignore_unreachable_insn(struct symbol *func,
1083 if (insn->type == INSN_NOP) 1075 if (insn->type == INSN_NOP)
1084 return true; 1076 return true;
1085 1077
1086 if (is_gcov_insn(insn))
1087 return true;
1088
1089 /* 1078 /*
1090 * Check if this (or a subsequent) instruction is related to 1079 * Check if this (or a subsequent) instruction is related to
1091 * CONFIG_UBSAN or CONFIG_KASAN. 1080 * CONFIG_UBSAN or CONFIG_KASAN.
@@ -1146,6 +1135,19 @@ static int validate_functions(struct objtool_file *file)
1146 ignore_unreachable_insn(func, insn)) 1135 ignore_unreachable_insn(func, insn))
1147 continue; 1136 continue;
1148 1137
1138 /*
1139 * gcov produces a lot of unreachable
1140 * instructions. If we get an unreachable
1141 * warning and the file has gcov enabled, just
1142 * ignore it, and all other such warnings for
1143 * the file.
1144 */
1145 if (!file->ignore_unreachables &&
1146 gcov_enabled(file)) {
1147 file->ignore_unreachables = true;
1148 continue;
1149 }
1150
1149 WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset); 1151 WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
1150 warnings++; 1152 warnings++;
1151 } 1153 }
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
index 5ce61a1bda9c..df14e6b67b63 100644
--- a/tools/perf/jvmti/Makefile
+++ b/tools/perf/jvmti/Makefile
@@ -36,7 +36,7 @@ SOLIBEXT=so
36# The following works at least on fedora 23, you may need the next 36# The following works at least on fedora 23, you may need the next
37# line for other distros. 37# line for other distros.
38ifneq (,$(wildcard /usr/sbin/update-java-alternatives)) 38ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
39JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3) 39JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
40else 40else
41 ifneq (,$(wildcard /usr/sbin/alternatives)) 41 ifneq (,$(wildcard /usr/sbin/alternatives))
42 JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g') 42 JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fb8e42c7507a..4ffff7be9299 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -601,7 +601,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
601 u64 nr_entries; 601 u64 nr_entries;
602 hbt->timer(hbt->arg); 602 hbt->timer(hbt->arg);
603 603
604 if (hist_browser__has_filter(browser)) 604 if (hist_browser__has_filter(browser) ||
605 symbol_conf.report_hierarchy)
605 hist_browser__update_nr_entries(browser); 606 hist_browser__update_nr_entries(browser);
606 607
607 nr_entries = hist_browser__nr_entries(browser); 608 nr_entries = hist_browser__nr_entries(browser);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 85dd0db0a127..2f3eded54b0c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1895,7 +1895,6 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
1895 if (ph->needs_swap) 1895 if (ph->needs_swap)
1896 nr = bswap_32(nr); 1896 nr = bswap_32(nr);
1897 1897
1898 ph->env.nr_numa_nodes = nr;
1899 nodes = zalloc(sizeof(*nodes) * nr); 1898 nodes = zalloc(sizeof(*nodes) * nr);
1900 if (!nodes) 1899 if (!nodes)
1901 return -ENOMEM; 1900 return -ENOMEM;
@@ -1932,6 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
1932 1931
1933 free(str); 1932 free(str);
1934 } 1933 }
1934 ph->env.nr_numa_nodes = nr;
1935 ph->env.numa_nodes = nodes; 1935 ph->env.numa_nodes = nodes;
1936 return 0; 1936 return 0;
1937 1937
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 9f43fda2570f..660fca05bc93 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -136,8 +136,8 @@ do { \
136group [^,{}/]*[{][^}]*[}][^,{}/]* 136group [^,{}/]*[{][^}]*[}][^,{}/]*
137event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* 137event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
138event [^,{}/]+ 138event [^,{}/]+
139bpf_object .*\.(o|bpf) 139bpf_object [^,{}]+\.(o|bpf)
140bpf_source .*\.c 140bpf_source [^,{}]+\.c
141 141
142num_dec [0-9]+ 142num_dec [0-9]+
143num_hex 0x[a-fA-F0-9]+ 143num_hex 0x[a-fA-F0-9]+
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index db9668869f6f..8035cc1eb955 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
84 * mm and might be done in another context, so we must 84 * mm and might be done in another context, so we must
85 * use FOLL_REMOTE. 85 * use FOLL_REMOTE.
86 */ 86 */
87 __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE); 87 __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
88 FOLL_WRITE | FOLL_REMOTE);
88 89
89 kvm_async_page_present_sync(vcpu, apf); 90 kvm_async_page_present_sync(vcpu, apf);
90 91
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81dfc73d3df3..2907b7b78654 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1346,21 +1346,19 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
1346static int get_user_page_nowait(unsigned long start, int write, 1346static int get_user_page_nowait(unsigned long start, int write,
1347 struct page **page) 1347 struct page **page)
1348{ 1348{
1349 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1349 int flags = FOLL_NOWAIT | FOLL_HWPOISON;
1350 1350
1351 if (write) 1351 if (write)
1352 flags |= FOLL_WRITE; 1352 flags |= FOLL_WRITE;
1353 1353
1354 return __get_user_pages(current, current->mm, start, 1, flags, page, 1354 return get_user_pages(start, 1, flags, page, NULL);
1355 NULL, NULL);
1356} 1355}
1357 1356
1358static inline int check_user_page_hwpoison(unsigned long addr) 1357static inline int check_user_page_hwpoison(unsigned long addr)
1359{ 1358{
1360 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1359 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
1361 1360
1362 rc = __get_user_pages(current, current->mm, addr, 1, 1361 rc = get_user_pages(addr, 1, flags, NULL, NULL);
1363 flags, NULL, NULL, NULL);
1364 return rc == -EHWPOISON; 1362 return rc == -EHWPOISON;
1365} 1363}
1366 1364
@@ -1416,10 +1414,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1416 down_read(&current->mm->mmap_sem); 1414 down_read(&current->mm->mmap_sem);
1417 npages = get_user_page_nowait(addr, write_fault, page); 1415 npages = get_user_page_nowait(addr, write_fault, page);
1418 up_read(&current->mm->mmap_sem); 1416 up_read(&current->mm->mmap_sem);
1419 } else 1417 } else {
1418 unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
1419
1420 if (write_fault)
1421 flags |= FOLL_WRITE;
1422
1420 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1423 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
1421 write_fault, 0, page, 1424 page, flags);
1422 FOLL_TOUCH|FOLL_HWPOISON); 1425 }
1423 if (npages != 1) 1426 if (npages != 1)
1424 return npages; 1427 return npages;
1425 1428